1 /*
2 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_mount.h"
28 #include "xfs_bmap_btree.h"
29 #include "xfs_alloc_btree.h"
30 #include "xfs_ialloc_btree.h"
31 #include "xfs_dinode.h"
32 #include "xfs_inode.h"
33 #include "xfs_btree.h"
34 #include "xfs_alloc.h"
35 #include "xfs_error.h"
36 #include "xfs_trace.h"
37
38 struct workqueue_struct *xfs_alloc_wq;
39
40 #define XFS_ABSDIFF(a,b) (((a) <= (b)) ? ((b) - (a)) : ((a) - (b)))
41
42 #define XFSA_FIXUP_BNO_OK 1
43 #define XFSA_FIXUP_CNT_OK 2
44
45 STATIC int xfs_alloc_ag_vextent_exact(xfs_alloc_arg_t *);
46 STATIC int xfs_alloc_ag_vextent_near(xfs_alloc_arg_t *);
47 STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *);
48 STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *,
49 xfs_btree_cur_t *, xfs_agblock_t *, xfs_extlen_t *, int *);
50 STATIC void xfs_alloc_busy_trim(struct xfs_alloc_arg *,
51 xfs_agblock_t, xfs_extlen_t, xfs_agblock_t *, xfs_extlen_t *);
52
53 /*
54 * Lookup the record equal to [bno, len] in the btree given by cur.
55 */
56 STATIC int /* error */
xfs_alloc_lookup_eq(struct xfs_btree_cur * cur,xfs_agblock_t bno,xfs_extlen_t len,int * stat)57 xfs_alloc_lookup_eq(
58 struct xfs_btree_cur *cur, /* btree cursor */
59 xfs_agblock_t bno, /* starting block of extent */
60 xfs_extlen_t len, /* length of extent */
61 int *stat) /* success/failure */
62 {
63 cur->bc_rec.a.ar_startblock = bno;
64 cur->bc_rec.a.ar_blockcount = len;
65 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
66 }
67
68 /*
69 * Lookup the first record greater than or equal to [bno, len]
70 * in the btree given by cur.
71 */
72 int /* error */
xfs_alloc_lookup_ge(struct xfs_btree_cur * cur,xfs_agblock_t bno,xfs_extlen_t len,int * stat)73 xfs_alloc_lookup_ge(
74 struct xfs_btree_cur *cur, /* btree cursor */
75 xfs_agblock_t bno, /* starting block of extent */
76 xfs_extlen_t len, /* length of extent */
77 int *stat) /* success/failure */
78 {
79 cur->bc_rec.a.ar_startblock = bno;
80 cur->bc_rec.a.ar_blockcount = len;
81 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
82 }
83
84 /*
85 * Lookup the first record less than or equal to [bno, len]
86 * in the btree given by cur.
87 */
88 int /* error */
xfs_alloc_lookup_le(struct xfs_btree_cur * cur,xfs_agblock_t bno,xfs_extlen_t len,int * stat)89 xfs_alloc_lookup_le(
90 struct xfs_btree_cur *cur, /* btree cursor */
91 xfs_agblock_t bno, /* starting block of extent */
92 xfs_extlen_t len, /* length of extent */
93 int *stat) /* success/failure */
94 {
95 cur->bc_rec.a.ar_startblock = bno;
96 cur->bc_rec.a.ar_blockcount = len;
97 return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
98 }
99
100 /*
101 * Update the record referred to by cur to the value given
102 * by [bno, len].
103 * This either works (return 0) or gets an EFSCORRUPTED error.
104 */
105 STATIC int /* error */
xfs_alloc_update(struct xfs_btree_cur * cur,xfs_agblock_t bno,xfs_extlen_t len)106 xfs_alloc_update(
107 struct xfs_btree_cur *cur, /* btree cursor */
108 xfs_agblock_t bno, /* starting block of extent */
109 xfs_extlen_t len) /* length of extent */
110 {
111 union xfs_btree_rec rec;
112
113 rec.alloc.ar_startblock = cpu_to_be32(bno);
114 rec.alloc.ar_blockcount = cpu_to_be32(len);
115 return xfs_btree_update(cur, &rec);
116 }
117
118 /*
119 * Get the data from the pointed-to record.
120 */
121 int /* error */
xfs_alloc_get_rec(struct xfs_btree_cur * cur,xfs_agblock_t * bno,xfs_extlen_t * len,int * stat)122 xfs_alloc_get_rec(
123 struct xfs_btree_cur *cur, /* btree cursor */
124 xfs_agblock_t *bno, /* output: starting block of extent */
125 xfs_extlen_t *len, /* output: length of extent */
126 int *stat) /* output: success/failure */
127 {
128 union xfs_btree_rec *rec;
129 int error;
130
131 error = xfs_btree_get_rec(cur, &rec, stat);
132 if (!error && *stat == 1) {
133 *bno = be32_to_cpu(rec->alloc.ar_startblock);
134 *len = be32_to_cpu(rec->alloc.ar_blockcount);
135 }
136 return error;
137 }
138
139 /*
140 * Compute aligned version of the found extent.
141 * Takes alignment and min length into account.
142 */
143 STATIC void
xfs_alloc_compute_aligned(xfs_alloc_arg_t * args,xfs_agblock_t foundbno,xfs_extlen_t foundlen,xfs_agblock_t * resbno,xfs_extlen_t * reslen)144 xfs_alloc_compute_aligned(
145 xfs_alloc_arg_t *args, /* allocation argument structure */
146 xfs_agblock_t foundbno, /* starting block in found extent */
147 xfs_extlen_t foundlen, /* length in found extent */
148 xfs_agblock_t *resbno, /* result block number */
149 xfs_extlen_t *reslen) /* result length */
150 {
151 xfs_agblock_t bno;
152 xfs_extlen_t len;
153
154 /* Trim busy sections out of found extent */
155 xfs_alloc_busy_trim(args, foundbno, foundlen, &bno, &len);
156
157 if (args->alignment > 1 && len >= args->minlen) {
158 xfs_agblock_t aligned_bno = roundup(bno, args->alignment);
159 xfs_extlen_t diff = aligned_bno - bno;
160
161 *resbno = aligned_bno;
162 *reslen = diff >= len ? 0 : len - diff;
163 } else {
164 *resbno = bno;
165 *reslen = len;
166 }
167 }
168
169 /*
170 * Compute best start block and diff for "near" allocations.
171 * freelen >= wantlen already checked by caller.
172 */
173 STATIC xfs_extlen_t /* difference value (absolute) */
xfs_alloc_compute_diff(xfs_agblock_t wantbno,xfs_extlen_t wantlen,xfs_extlen_t alignment,xfs_agblock_t freebno,xfs_extlen_t freelen,xfs_agblock_t * newbnop)174 xfs_alloc_compute_diff(
175 xfs_agblock_t wantbno, /* target starting block */
176 xfs_extlen_t wantlen, /* target length */
177 xfs_extlen_t alignment, /* target alignment */
178 xfs_agblock_t freebno, /* freespace's starting block */
179 xfs_extlen_t freelen, /* freespace's length */
180 xfs_agblock_t *newbnop) /* result: best start block from free */
181 {
182 xfs_agblock_t freeend; /* end of freespace extent */
183 xfs_agblock_t newbno1; /* return block number */
184 xfs_agblock_t newbno2; /* other new block number */
185 xfs_extlen_t newlen1=0; /* length with newbno1 */
186 xfs_extlen_t newlen2=0; /* length with newbno2 */
187 xfs_agblock_t wantend; /* end of target extent */
188
189 ASSERT(freelen >= wantlen);
190 freeend = freebno + freelen;
191 wantend = wantbno + wantlen;
192 if (freebno >= wantbno) {
193 if ((newbno1 = roundup(freebno, alignment)) >= freeend)
194 newbno1 = NULLAGBLOCK;
195 } else if (freeend >= wantend && alignment > 1) {
196 newbno1 = roundup(wantbno, alignment);
197 newbno2 = newbno1 - alignment;
198 if (newbno1 >= freeend)
199 newbno1 = NULLAGBLOCK;
200 else
201 newlen1 = XFS_EXTLEN_MIN(wantlen, freeend - newbno1);
202 if (newbno2 < freebno)
203 newbno2 = NULLAGBLOCK;
204 else
205 newlen2 = XFS_EXTLEN_MIN(wantlen, freeend - newbno2);
206 if (newbno1 != NULLAGBLOCK && newbno2 != NULLAGBLOCK) {
207 if (newlen1 < newlen2 ||
208 (newlen1 == newlen2 &&
209 XFS_ABSDIFF(newbno1, wantbno) >
210 XFS_ABSDIFF(newbno2, wantbno)))
211 newbno1 = newbno2;
212 } else if (newbno2 != NULLAGBLOCK)
213 newbno1 = newbno2;
214 } else if (freeend >= wantend) {
215 newbno1 = wantbno;
216 } else if (alignment > 1) {
217 newbno1 = roundup(freeend - wantlen, alignment);
218 if (newbno1 > freeend - wantlen &&
219 newbno1 - alignment >= freebno)
220 newbno1 -= alignment;
221 else if (newbno1 >= freeend)
222 newbno1 = NULLAGBLOCK;
223 } else
224 newbno1 = freeend - wantlen;
225 *newbnop = newbno1;
226 return newbno1 == NULLAGBLOCK ? 0 : XFS_ABSDIFF(newbno1, wantbno);
227 }
228
229 /*
230 * Fix up the length, based on mod and prod.
231 * len should be k * prod + mod for some k.
232 * If len is too small it is returned unchanged.
233 * If len hits maxlen it is left alone.
234 */
235 STATIC void
xfs_alloc_fix_len(xfs_alloc_arg_t * args)236 xfs_alloc_fix_len(
237 xfs_alloc_arg_t *args) /* allocation argument structure */
238 {
239 xfs_extlen_t k;
240 xfs_extlen_t rlen;
241
242 ASSERT(args->mod < args->prod);
243 rlen = args->len;
244 ASSERT(rlen >= args->minlen);
245 ASSERT(rlen <= args->maxlen);
246 if (args->prod <= 1 || rlen < args->mod || rlen == args->maxlen ||
247 (args->mod == 0 && rlen < args->prod))
248 return;
249 k = rlen % args->prod;
250 if (k == args->mod)
251 return;
252 if (k > args->mod) {
253 if ((int)(rlen = rlen - k - args->mod) < (int)args->minlen)
254 return;
255 } else {
256 if ((int)(rlen = rlen - args->prod - (args->mod - k)) <
257 (int)args->minlen)
258 return;
259 }
260 ASSERT(rlen >= args->minlen);
261 ASSERT(rlen <= args->maxlen);
262 args->len = rlen;
263 }
264
265 /*
266 * Fix up length if there is too little space left in the a.g.
267 * Return 1 if ok, 0 if too little, should give up.
268 */
269 STATIC int
xfs_alloc_fix_minleft(xfs_alloc_arg_t * args)270 xfs_alloc_fix_minleft(
271 xfs_alloc_arg_t *args) /* allocation argument structure */
272 {
273 xfs_agf_t *agf; /* a.g. freelist header */
274 int diff; /* free space difference */
275
276 if (args->minleft == 0)
277 return 1;
278 agf = XFS_BUF_TO_AGF(args->agbp);
279 diff = be32_to_cpu(agf->agf_freeblks)
280 - args->len - args->minleft;
281 if (diff >= 0)
282 return 1;
283 args->len += diff; /* shrink the allocated space */
284 if (args->len >= args->minlen)
285 return 1;
286 args->agbno = NULLAGBLOCK;
287 return 0;
288 }
289
290 /*
291 * Update the two btrees, logically removing from freespace the extent
292 * starting at rbno, rlen blocks. The extent is contained within the
293 * actual (current) free extent fbno for flen blocks.
294 * Flags are passed in indicating whether the cursors are set to the
295 * relevant records.
296 */
297 STATIC int /* error code */
xfs_alloc_fixup_trees(xfs_btree_cur_t * cnt_cur,xfs_btree_cur_t * bno_cur,xfs_agblock_t fbno,xfs_extlen_t flen,xfs_agblock_t rbno,xfs_extlen_t rlen,int flags)298 xfs_alloc_fixup_trees(
299 xfs_btree_cur_t *cnt_cur, /* cursor for by-size btree */
300 xfs_btree_cur_t *bno_cur, /* cursor for by-block btree */
301 xfs_agblock_t fbno, /* starting block of free extent */
302 xfs_extlen_t flen, /* length of free extent */
303 xfs_agblock_t rbno, /* starting block of returned extent */
304 xfs_extlen_t rlen, /* length of returned extent */
305 int flags) /* flags, XFSA_FIXUP_... */
306 {
307 int error; /* error code */
308 int i; /* operation results */
309 xfs_agblock_t nfbno1; /* first new free startblock */
310 xfs_agblock_t nfbno2; /* second new free startblock */
311 xfs_extlen_t nflen1=0; /* first new free length */
312 xfs_extlen_t nflen2=0; /* second new free length */
313
314 /*
315 * Look up the record in the by-size tree if necessary.
316 */
317 if (flags & XFSA_FIXUP_CNT_OK) {
318 #ifdef DEBUG
319 if ((error = xfs_alloc_get_rec(cnt_cur, &nfbno1, &nflen1, &i)))
320 return error;
321 XFS_WANT_CORRUPTED_RETURN(
322 i == 1 && nfbno1 == fbno && nflen1 == flen);
323 #endif
324 } else {
325 if ((error = xfs_alloc_lookup_eq(cnt_cur, fbno, flen, &i)))
326 return error;
327 XFS_WANT_CORRUPTED_RETURN(i == 1);
328 }
329 /*
330 * Look up the record in the by-block tree if necessary.
331 */
332 if (flags & XFSA_FIXUP_BNO_OK) {
333 #ifdef DEBUG
334 if ((error = xfs_alloc_get_rec(bno_cur, &nfbno1, &nflen1, &i)))
335 return error;
336 XFS_WANT_CORRUPTED_RETURN(
337 i == 1 && nfbno1 == fbno && nflen1 == flen);
338 #endif
339 } else {
340 if ((error = xfs_alloc_lookup_eq(bno_cur, fbno, flen, &i)))
341 return error;
342 XFS_WANT_CORRUPTED_RETURN(i == 1);
343 }
344
345 #ifdef DEBUG
346 if (bno_cur->bc_nlevels == 1 && cnt_cur->bc_nlevels == 1) {
347 struct xfs_btree_block *bnoblock;
348 struct xfs_btree_block *cntblock;
349
350 bnoblock = XFS_BUF_TO_BLOCK(bno_cur->bc_bufs[0]);
351 cntblock = XFS_BUF_TO_BLOCK(cnt_cur->bc_bufs[0]);
352
353 XFS_WANT_CORRUPTED_RETURN(
354 bnoblock->bb_numrecs == cntblock->bb_numrecs);
355 }
356 #endif
357
358 /*
359 * Deal with all four cases: the allocated record is contained
360 * within the freespace record, so we can have new freespace
361 * at either (or both) end, or no freespace remaining.
362 */
363 if (rbno == fbno && rlen == flen)
364 nfbno1 = nfbno2 = NULLAGBLOCK;
365 else if (rbno == fbno) {
366 nfbno1 = rbno + rlen;
367 nflen1 = flen - rlen;
368 nfbno2 = NULLAGBLOCK;
369 } else if (rbno + rlen == fbno + flen) {
370 nfbno1 = fbno;
371 nflen1 = flen - rlen;
372 nfbno2 = NULLAGBLOCK;
373 } else {
374 nfbno1 = fbno;
375 nflen1 = rbno - fbno;
376 nfbno2 = rbno + rlen;
377 nflen2 = (fbno + flen) - nfbno2;
378 }
379 /*
380 * Delete the entry from the by-size btree.
381 */
382 if ((error = xfs_btree_delete(cnt_cur, &i)))
383 return error;
384 XFS_WANT_CORRUPTED_RETURN(i == 1);
385 /*
386 * Add new by-size btree entry(s).
387 */
388 if (nfbno1 != NULLAGBLOCK) {
389 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno1, nflen1, &i)))
390 return error;
391 XFS_WANT_CORRUPTED_RETURN(i == 0);
392 if ((error = xfs_btree_insert(cnt_cur, &i)))
393 return error;
394 XFS_WANT_CORRUPTED_RETURN(i == 1);
395 }
396 if (nfbno2 != NULLAGBLOCK) {
397 if ((error = xfs_alloc_lookup_eq(cnt_cur, nfbno2, nflen2, &i)))
398 return error;
399 XFS_WANT_CORRUPTED_RETURN(i == 0);
400 if ((error = xfs_btree_insert(cnt_cur, &i)))
401 return error;
402 XFS_WANT_CORRUPTED_RETURN(i == 1);
403 }
404 /*
405 * Fix up the by-block btree entry(s).
406 */
407 if (nfbno1 == NULLAGBLOCK) {
408 /*
409 * No remaining freespace, just delete the by-block tree entry.
410 */
411 if ((error = xfs_btree_delete(bno_cur, &i)))
412 return error;
413 XFS_WANT_CORRUPTED_RETURN(i == 1);
414 } else {
415 /*
416 * Update the by-block entry to start later|be shorter.
417 */
418 if ((error = xfs_alloc_update(bno_cur, nfbno1, nflen1)))
419 return error;
420 }
421 if (nfbno2 != NULLAGBLOCK) {
422 /*
423 * 2 resulting free entries, need to add one.
424 */
425 if ((error = xfs_alloc_lookup_eq(bno_cur, nfbno2, nflen2, &i)))
426 return error;
427 XFS_WANT_CORRUPTED_RETURN(i == 0);
428 if ((error = xfs_btree_insert(bno_cur, &i)))
429 return error;
430 XFS_WANT_CORRUPTED_RETURN(i == 1);
431 }
432 return 0;
433 }
434
435 /*
436 * Read in the allocation group free block array.
437 */
438 STATIC int /* error */
xfs_alloc_read_agfl(xfs_mount_t * mp,xfs_trans_t * tp,xfs_agnumber_t agno,xfs_buf_t ** bpp)439 xfs_alloc_read_agfl(
440 xfs_mount_t *mp, /* mount point structure */
441 xfs_trans_t *tp, /* transaction pointer */
442 xfs_agnumber_t agno, /* allocation group number */
443 xfs_buf_t **bpp) /* buffer for the ag free block array */
444 {
445 xfs_buf_t *bp; /* return value */
446 int error;
447
448 ASSERT(agno != NULLAGNUMBER);
449 error = xfs_trans_read_buf(
450 mp, tp, mp->m_ddev_targp,
451 XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
452 XFS_FSS_TO_BB(mp, 1), 0, &bp);
453 if (error)
454 return error;
455 ASSERT(!xfs_buf_geterror(bp));
456 xfs_buf_set_ref(bp, XFS_AGFL_REF);
457 *bpp = bp;
458 return 0;
459 }
460
461 STATIC int
xfs_alloc_update_counters(struct xfs_trans * tp,struct xfs_perag * pag,struct xfs_buf * agbp,long len)462 xfs_alloc_update_counters(
463 struct xfs_trans *tp,
464 struct xfs_perag *pag,
465 struct xfs_buf *agbp,
466 long len)
467 {
468 struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
469
470 pag->pagf_freeblks += len;
471 be32_add_cpu(&agf->agf_freeblks, len);
472
473 xfs_trans_agblocks_delta(tp, len);
474 if (unlikely(be32_to_cpu(agf->agf_freeblks) >
475 be32_to_cpu(agf->agf_length)))
476 return EFSCORRUPTED;
477
478 xfs_alloc_log_agf(tp, agbp, XFS_AGF_FREEBLKS);
479 return 0;
480 }
481
482 /*
483 * Allocation group level functions.
484 */
485
486 /*
487 * Allocate a variable extent in the allocation group agno.
488 * Type and bno are used to determine where in the allocation group the
489 * extent will start.
490 * Extent's length (returned in *len) will be between minlen and maxlen,
491 * and of the form k * prod + mod unless there's nothing that large.
492 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
493 */
494 STATIC int /* error */
xfs_alloc_ag_vextent(xfs_alloc_arg_t * args)495 xfs_alloc_ag_vextent(
496 xfs_alloc_arg_t *args) /* argument structure for allocation */
497 {
498 int error=0;
499
500 ASSERT(args->minlen > 0);
501 ASSERT(args->maxlen > 0);
502 ASSERT(args->minlen <= args->maxlen);
503 ASSERT(args->mod < args->prod);
504 ASSERT(args->alignment > 0);
505 /*
506 * Branch to correct routine based on the type.
507 */
508 args->wasfromfl = 0;
509 switch (args->type) {
510 case XFS_ALLOCTYPE_THIS_AG:
511 error = xfs_alloc_ag_vextent_size(args);
512 break;
513 case XFS_ALLOCTYPE_NEAR_BNO:
514 error = xfs_alloc_ag_vextent_near(args);
515 break;
516 case XFS_ALLOCTYPE_THIS_BNO:
517 error = xfs_alloc_ag_vextent_exact(args);
518 break;
519 default:
520 ASSERT(0);
521 /* NOTREACHED */
522 }
523
524 if (error || args->agbno == NULLAGBLOCK)
525 return error;
526
527 ASSERT(args->len >= args->minlen);
528 ASSERT(args->len <= args->maxlen);
529 ASSERT(!args->wasfromfl || !args->isfl);
530 ASSERT(args->agbno % args->alignment == 0);
531
532 if (!args->wasfromfl) {
533 error = xfs_alloc_update_counters(args->tp, args->pag,
534 args->agbp,
535 -((long)(args->len)));
536 if (error)
537 return error;
538
539 ASSERT(!xfs_alloc_busy_search(args->mp, args->agno,
540 args->agbno, args->len));
541 }
542
543 if (!args->isfl) {
544 xfs_trans_mod_sb(args->tp, args->wasdel ?
545 XFS_TRANS_SB_RES_FDBLOCKS :
546 XFS_TRANS_SB_FDBLOCKS,
547 -((long)(args->len)));
548 }
549
550 XFS_STATS_INC(xs_allocx);
551 XFS_STATS_ADD(xs_allocb, args->len);
552 return error;
553 }
554
555 /*
556 * Allocate a variable extent at exactly agno/bno.
557 * Extent's length (returned in *len) will be between minlen and maxlen,
558 * and of the form k * prod + mod unless there's nothing that large.
559 * Return the starting a.g. block (bno), or NULLAGBLOCK if we can't do it.
560 */
561 STATIC int /* error */
xfs_alloc_ag_vextent_exact(xfs_alloc_arg_t * args)562 xfs_alloc_ag_vextent_exact(
563 xfs_alloc_arg_t *args) /* allocation argument structure */
564 {
565 xfs_btree_cur_t *bno_cur;/* by block-number btree cursor */
566 xfs_btree_cur_t *cnt_cur;/* by count btree cursor */
567 int error;
568 xfs_agblock_t fbno; /* start block of found extent */
569 xfs_extlen_t flen; /* length of found extent */
570 xfs_agblock_t tbno; /* start block of trimmed extent */
571 xfs_extlen_t tlen; /* length of trimmed extent */
572 xfs_agblock_t tend; /* end block of trimmed extent */
573 int i; /* success/failure of operation */
574
575 ASSERT(args->alignment == 1);
576
577 /*
578 * Allocate/initialize a cursor for the by-number freespace btree.
579 */
580 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
581 args->agno, XFS_BTNUM_BNO);
582
583 /*
584 * Lookup bno and minlen in the btree (minlen is irrelevant, really).
585 * Look for the closest free block <= bno, it must contain bno
586 * if any free block does.
587 */
588 error = xfs_alloc_lookup_le(bno_cur, args->agbno, args->minlen, &i);
589 if (error)
590 goto error0;
591 if (!i)
592 goto not_found;
593
594 /*
595 * Grab the freespace record.
596 */
597 error = xfs_alloc_get_rec(bno_cur, &fbno, &flen, &i);
598 if (error)
599 goto error0;
600 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
601 ASSERT(fbno <= args->agbno);
602
603 /*
604 * Check for overlapping busy extents.
605 */
606 xfs_alloc_busy_trim(args, fbno, flen, &tbno, &tlen);
607
608 /*
609 * Give up if the start of the extent is busy, or the freespace isn't
610 * long enough for the minimum request.
611 */
612 if (tbno > args->agbno)
613 goto not_found;
614 if (tlen < args->minlen)
615 goto not_found;
616 tend = tbno + tlen;
617 if (tend < args->agbno + args->minlen)
618 goto not_found;
619
620 /*
621 * End of extent will be smaller of the freespace end and the
622 * maximal requested end.
623 *
624 * Fix the length according to mod and prod if given.
625 */
626 args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen)
627 - args->agbno;
628 xfs_alloc_fix_len(args);
629 if (!xfs_alloc_fix_minleft(args))
630 goto not_found;
631
632 ASSERT(args->agbno + args->len <= tend);
633
634 /*
635 * We are allocating agbno for args->len
636 * Allocate/initialize a cursor for the by-size btree.
637 */
638 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
639 args->agno, XFS_BTNUM_CNT);
640 ASSERT(args->agbno + args->len <=
641 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
642 error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen, args->agbno,
643 args->len, XFSA_FIXUP_BNO_OK);
644 if (error) {
645 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
646 goto error0;
647 }
648
649 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
650 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
651
652 args->wasfromfl = 0;
653 trace_xfs_alloc_exact_done(args);
654 return 0;
655
656 not_found:
657 /* Didn't find it, return null. */
658 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
659 args->agbno = NULLAGBLOCK;
660 trace_xfs_alloc_exact_notfound(args);
661 return 0;
662
663 error0:
664 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
665 trace_xfs_alloc_exact_error(args);
666 return error;
667 }
668
669 /*
670 * Search the btree in a given direction via the search cursor and compare
671 * the records found against the good extent we've already found.
672 */
673 STATIC int
xfs_alloc_find_best_extent(struct xfs_alloc_arg * args,struct xfs_btree_cur ** gcur,struct xfs_btree_cur ** scur,xfs_agblock_t gdiff,xfs_agblock_t * sbno,xfs_extlen_t * slen,xfs_agblock_t * sbnoa,xfs_extlen_t * slena,int dir)674 xfs_alloc_find_best_extent(
675 struct xfs_alloc_arg *args, /* allocation argument structure */
676 struct xfs_btree_cur **gcur, /* good cursor */
677 struct xfs_btree_cur **scur, /* searching cursor */
678 xfs_agblock_t gdiff, /* difference for search comparison */
679 xfs_agblock_t *sbno, /* extent found by search */
680 xfs_extlen_t *slen, /* extent length */
681 xfs_agblock_t *sbnoa, /* aligned extent found by search */
682 xfs_extlen_t *slena, /* aligned extent length */
683 int dir) /* 0 = search right, 1 = search left */
684 {
685 xfs_agblock_t new;
686 xfs_agblock_t sdiff;
687 int error;
688 int i;
689
690 /* The good extent is perfect, no need to search. */
691 if (!gdiff)
692 goto out_use_good;
693
694 /*
695 * Look until we find a better one, run out of space or run off the end.
696 */
697 do {
698 error = xfs_alloc_get_rec(*scur, sbno, slen, &i);
699 if (error)
700 goto error0;
701 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
702 xfs_alloc_compute_aligned(args, *sbno, *slen, sbnoa, slena);
703
704 /*
705 * The good extent is closer than this one.
706 */
707 if (!dir) {
708 if (*sbnoa >= args->agbno + gdiff)
709 goto out_use_good;
710 } else {
711 if (*sbnoa <= args->agbno - gdiff)
712 goto out_use_good;
713 }
714
715 /*
716 * Same distance, compare length and pick the best.
717 */
718 if (*slena >= args->minlen) {
719 args->len = XFS_EXTLEN_MIN(*slena, args->maxlen);
720 xfs_alloc_fix_len(args);
721
722 sdiff = xfs_alloc_compute_diff(args->agbno, args->len,
723 args->alignment, *sbnoa,
724 *slena, &new);
725
726 /*
727 * Choose closer size and invalidate other cursor.
728 */
729 if (sdiff < gdiff)
730 goto out_use_search;
731 goto out_use_good;
732 }
733
734 if (!dir)
735 error = xfs_btree_increment(*scur, 0, &i);
736 else
737 error = xfs_btree_decrement(*scur, 0, &i);
738 if (error)
739 goto error0;
740 } while (i);
741
742 out_use_good:
743 xfs_btree_del_cursor(*scur, XFS_BTREE_NOERROR);
744 *scur = NULL;
745 return 0;
746
747 out_use_search:
748 xfs_btree_del_cursor(*gcur, XFS_BTREE_NOERROR);
749 *gcur = NULL;
750 return 0;
751
752 error0:
753 /* caller invalidates cursors */
754 return error;
755 }
756
757 /*
758 * Allocate a variable extent near bno in the allocation group agno.
759 * Extent's length (returned in len) will be between minlen and maxlen,
760 * and of the form k * prod + mod unless there's nothing that large.
761 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
762 */
763 STATIC int /* error */
xfs_alloc_ag_vextent_near(xfs_alloc_arg_t * args)764 xfs_alloc_ag_vextent_near(
765 xfs_alloc_arg_t *args) /* allocation argument structure */
766 {
767 xfs_btree_cur_t *bno_cur_gt; /* cursor for bno btree, right side */
768 xfs_btree_cur_t *bno_cur_lt; /* cursor for bno btree, left side */
769 xfs_btree_cur_t *cnt_cur; /* cursor for count btree */
770 xfs_agblock_t gtbno; /* start bno of right side entry */
771 xfs_agblock_t gtbnoa; /* aligned ... */
772 xfs_extlen_t gtdiff; /* difference to right side entry */
773 xfs_extlen_t gtlen; /* length of right side entry */
774 xfs_extlen_t gtlena; /* aligned ... */
775 xfs_agblock_t gtnew; /* useful start bno of right side */
776 int error; /* error code */
777 int i; /* result code, temporary */
778 int j; /* result code, temporary */
779 xfs_agblock_t ltbno; /* start bno of left side entry */
780 xfs_agblock_t ltbnoa; /* aligned ... */
781 xfs_extlen_t ltdiff; /* difference to left side entry */
782 xfs_extlen_t ltlen; /* length of left side entry */
783 xfs_extlen_t ltlena; /* aligned ... */
784 xfs_agblock_t ltnew; /* useful start bno of left side */
785 xfs_extlen_t rlen; /* length of returned extent */
786 int forced = 0;
787 #if defined(DEBUG) && defined(__KERNEL__)
788 /*
789 * Randomly don't execute the first algorithm.
790 */
791 int dofirst; /* set to do first algorithm */
792
793 dofirst = random32() & 1;
794 #endif
795
796 restart:
797 bno_cur_lt = NULL;
798 bno_cur_gt = NULL;
799 ltlen = 0;
800 gtlena = 0;
801 ltlena = 0;
802
803 /*
804 * Get a cursor for the by-size btree.
805 */
806 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
807 args->agno, XFS_BTNUM_CNT);
808
809 /*
810 * See if there are any free extents as big as maxlen.
811 */
812 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0, args->maxlen, &i)))
813 goto error0;
814 /*
815 * If none, then pick up the last entry in the tree unless the
816 * tree is empty.
817 */
818 if (!i) {
819 if ((error = xfs_alloc_ag_vextent_small(args, cnt_cur, <bno,
820 <len, &i)))
821 goto error0;
822 if (i == 0 || ltlen == 0) {
823 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
824 trace_xfs_alloc_near_noentry(args);
825 return 0;
826 }
827 ASSERT(i == 1);
828 }
829 args->wasfromfl = 0;
830
831 /*
832 * First algorithm.
833 * If the requested extent is large wrt the freespaces available
834 * in this a.g., then the cursor will be pointing to a btree entry
835 * near the right edge of the tree. If it's in the last btree leaf
836 * block, then we just examine all the entries in that block
837 * that are big enough, and pick the best one.
838 * This is written as a while loop so we can break out of it,
839 * but we never loop back to the top.
840 */
841 while (xfs_btree_islastblock(cnt_cur, 0)) {
842 xfs_extlen_t bdiff;
843 int besti=0;
844 xfs_extlen_t blen=0;
845 xfs_agblock_t bnew=0;
846
847 #if defined(DEBUG) && defined(__KERNEL__)
848 if (!dofirst)
849 break;
850 #endif
851 /*
852 * Start from the entry that lookup found, sequence through
853 * all larger free blocks. If we're actually pointing at a
854 * record smaller than maxlen, go to the start of this block,
855 * and skip all those smaller than minlen.
856 */
857 if (ltlen || args->alignment > 1) {
858 cnt_cur->bc_ptrs[0] = 1;
859 do {
860 if ((error = xfs_alloc_get_rec(cnt_cur, <bno,
861 <len, &i)))
862 goto error0;
863 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
864 if (ltlen >= args->minlen)
865 break;
866 if ((error = xfs_btree_increment(cnt_cur, 0, &i)))
867 goto error0;
868 } while (i);
869 ASSERT(ltlen >= args->minlen);
870 if (!i)
871 break;
872 }
873 i = cnt_cur->bc_ptrs[0];
874 for (j = 1, blen = 0, bdiff = 0;
875 !error && j && (blen < args->maxlen || bdiff > 0);
876 error = xfs_btree_increment(cnt_cur, 0, &j)) {
877 /*
878 * For each entry, decide if it's better than
879 * the previous best entry.
880 */
881 if ((error = xfs_alloc_get_rec(cnt_cur, <bno, <len, &i)))
882 goto error0;
883 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
884 xfs_alloc_compute_aligned(args, ltbno, ltlen,
885 <bnoa, <lena);
886 if (ltlena < args->minlen)
887 continue;
888 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
889 xfs_alloc_fix_len(args);
890 ASSERT(args->len >= args->minlen);
891 if (args->len < blen)
892 continue;
893 ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
894 args->alignment, ltbnoa, ltlena, <new);
895 if (ltnew != NULLAGBLOCK &&
896 (args->len > blen || ltdiff < bdiff)) {
897 bdiff = ltdiff;
898 bnew = ltnew;
899 blen = args->len;
900 besti = cnt_cur->bc_ptrs[0];
901 }
902 }
903 /*
904 * It didn't work. We COULD be in a case where
905 * there's a good record somewhere, so try again.
906 */
907 if (blen == 0)
908 break;
909 /*
910 * Point at the best entry, and retrieve it again.
911 */
912 cnt_cur->bc_ptrs[0] = besti;
913 if ((error = xfs_alloc_get_rec(cnt_cur, <bno, <len, &i)))
914 goto error0;
915 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
916 ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
917 args->len = blen;
918 if (!xfs_alloc_fix_minleft(args)) {
919 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
920 trace_xfs_alloc_near_nominleft(args);
921 return 0;
922 }
923 blen = args->len;
924 /*
925 * We are allocating starting at bnew for blen blocks.
926 */
927 args->agbno = bnew;
928 ASSERT(bnew >= ltbno);
929 ASSERT(bnew + blen <= ltbno + ltlen);
930 /*
931 * Set up a cursor for the by-bno tree.
932 */
933 bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp,
934 args->agbp, args->agno, XFS_BTNUM_BNO);
935 /*
936 * Fix up the btree entries.
937 */
938 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno,
939 ltlen, bnew, blen, XFSA_FIXUP_CNT_OK)))
940 goto error0;
941 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
942 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
943
944 trace_xfs_alloc_near_first(args);
945 return 0;
946 }
947 /*
948 * Second algorithm.
949 * Search in the by-bno tree to the left and to the right
950 * simultaneously, until in each case we find a space big enough,
951 * or run into the edge of the tree. When we run into the edge,
952 * we deallocate that cursor.
953 * If both searches succeed, we compare the two spaces and pick
954 * the better one.
955 * With alignment, it's possible for both to fail; the upper
956 * level algorithm that picks allocation groups for allocations
957 * is not supposed to do this.
958 */
959 /*
960 * Allocate and initialize the cursor for the leftward search.
961 */
962 bno_cur_lt = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
963 args->agno, XFS_BTNUM_BNO);
964 /*
965 * Lookup <= bno to find the leftward search's starting point.
966 */
967 if ((error = xfs_alloc_lookup_le(bno_cur_lt, args->agbno, args->maxlen, &i)))
968 goto error0;
969 if (!i) {
970 /*
971 * Didn't find anything; use this cursor for the rightward
972 * search.
973 */
974 bno_cur_gt = bno_cur_lt;
975 bno_cur_lt = NULL;
976 }
977 /*
978 * Found something. Duplicate the cursor for the rightward search.
979 */
980 else if ((error = xfs_btree_dup_cursor(bno_cur_lt, &bno_cur_gt)))
981 goto error0;
982 /*
983 * Increment the cursor, so we will point at the entry just right
984 * of the leftward entry if any, or to the leftmost entry.
985 */
986 if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
987 goto error0;
988 if (!i) {
989 /*
990 * It failed, there are no rightward entries.
991 */
992 xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_NOERROR);
993 bno_cur_gt = NULL;
994 }
995 /*
996 * Loop going left with the leftward cursor, right with the
997 * rightward cursor, until either both directions give up or
998 * we find an entry at least as big as minlen.
999 */
1000 do {
1001 if (bno_cur_lt) {
1002 if ((error = xfs_alloc_get_rec(bno_cur_lt, <bno, <len, &i)))
1003 goto error0;
1004 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1005 xfs_alloc_compute_aligned(args, ltbno, ltlen,
1006 <bnoa, <lena);
1007 if (ltlena >= args->minlen)
1008 break;
1009 if ((error = xfs_btree_decrement(bno_cur_lt, 0, &i)))
1010 goto error0;
1011 if (!i) {
1012 xfs_btree_del_cursor(bno_cur_lt,
1013 XFS_BTREE_NOERROR);
1014 bno_cur_lt = NULL;
1015 }
1016 }
1017 if (bno_cur_gt) {
1018 if ((error = xfs_alloc_get_rec(bno_cur_gt, >bno, >len, &i)))
1019 goto error0;
1020 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1021 xfs_alloc_compute_aligned(args, gtbno, gtlen,
1022 >bnoa, >lena);
1023 if (gtlena >= args->minlen)
1024 break;
1025 if ((error = xfs_btree_increment(bno_cur_gt, 0, &i)))
1026 goto error0;
1027 if (!i) {
1028 xfs_btree_del_cursor(bno_cur_gt,
1029 XFS_BTREE_NOERROR);
1030 bno_cur_gt = NULL;
1031 }
1032 }
1033 } while (bno_cur_lt || bno_cur_gt);
1034
1035 /*
1036 * Got both cursors still active, need to find better entry.
1037 */
1038 if (bno_cur_lt && bno_cur_gt) {
1039 if (ltlena >= args->minlen) {
1040 /*
1041 * Left side is good, look for a right side entry.
1042 */
1043 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1044 xfs_alloc_fix_len(args);
1045 ltdiff = xfs_alloc_compute_diff(args->agbno, args->len,
1046 args->alignment, ltbnoa, ltlena, <new);
1047
1048 error = xfs_alloc_find_best_extent(args,
1049 &bno_cur_lt, &bno_cur_gt,
1050 ltdiff, >bno, >len,
1051 >bnoa, >lena,
1052 0 /* search right */);
1053 } else {
1054 ASSERT(gtlena >= args->minlen);
1055
1056 /*
1057 * Right side is good, look for a left side entry.
1058 */
1059 args->len = XFS_EXTLEN_MIN(gtlena, args->maxlen);
1060 xfs_alloc_fix_len(args);
1061 gtdiff = xfs_alloc_compute_diff(args->agbno, args->len,
1062 args->alignment, gtbnoa, gtlena, >new);
1063
1064 error = xfs_alloc_find_best_extent(args,
1065 &bno_cur_gt, &bno_cur_lt,
1066 gtdiff, <bno, <len,
1067 <bnoa, <lena,
1068 1 /* search left */);
1069 }
1070
1071 if (error)
1072 goto error0;
1073 }
1074
1075 /*
1076 * If we couldn't get anything, give up.
1077 */
1078 if (bno_cur_lt == NULL && bno_cur_gt == NULL) {
1079 if (!forced++) {
1080 trace_xfs_alloc_near_busy(args);
1081 xfs_log_force(args->mp, XFS_LOG_SYNC);
1082 goto restart;
1083 }
1084
1085 trace_xfs_alloc_size_neither(args);
1086 args->agbno = NULLAGBLOCK;
1087 return 0;
1088 }
1089
1090 /*
1091 * At this point we have selected a freespace entry, either to the
1092 * left or to the right. If it's on the right, copy all the
1093 * useful variables to the "left" set so we only have one
1094 * copy of this code.
1095 */
1096 if (bno_cur_gt) {
1097 bno_cur_lt = bno_cur_gt;
1098 bno_cur_gt = NULL;
1099 ltbno = gtbno;
1100 ltbnoa = gtbnoa;
1101 ltlen = gtlen;
1102 ltlena = gtlena;
1103 j = 1;
1104 } else
1105 j = 0;
1106
1107 /*
1108 * Fix up the length and compute the useful address.
1109 */
1110 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
1111 xfs_alloc_fix_len(args);
1112 if (!xfs_alloc_fix_minleft(args)) {
1113 trace_xfs_alloc_near_nominleft(args);
1114 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1115 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1116 return 0;
1117 }
1118 rlen = args->len;
1119 (void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment,
1120 ltbnoa, ltlena, <new);
1121 ASSERT(ltnew >= ltbno);
1122 ASSERT(ltnew + rlen <= ltbnoa + ltlena);
1123 ASSERT(ltnew + rlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
1124 args->agbno = ltnew;
1125
1126 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur_lt, ltbno, ltlen,
1127 ltnew, rlen, XFSA_FIXUP_BNO_OK)))
1128 goto error0;
1129
1130 if (j)
1131 trace_xfs_alloc_near_greater(args);
1132 else
1133 trace_xfs_alloc_near_lesser(args);
1134
1135 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1136 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
1137 return 0;
1138
1139 error0:
1140 trace_xfs_alloc_near_error(args);
1141 if (cnt_cur != NULL)
1142 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1143 if (bno_cur_lt != NULL)
1144 xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_ERROR);
1145 if (bno_cur_gt != NULL)
1146 xfs_btree_del_cursor(bno_cur_gt, XFS_BTREE_ERROR);
1147 return error;
1148 }
1149
1150 /*
1151 * Allocate a variable extent anywhere in the allocation group agno.
1152 * Extent's length (returned in len) will be between minlen and maxlen,
1153 * and of the form k * prod + mod unless there's nothing that large.
1154 * Return the starting a.g. block, or NULLAGBLOCK if we can't do it.
1155 */
1156 STATIC int /* error */
xfs_alloc_ag_vextent_size(xfs_alloc_arg_t * args)1157 xfs_alloc_ag_vextent_size(
1158 xfs_alloc_arg_t *args) /* allocation argument structure */
1159 {
1160 xfs_btree_cur_t *bno_cur; /* cursor for bno btree */
1161 xfs_btree_cur_t *cnt_cur; /* cursor for cnt btree */
1162 int error; /* error result */
1163 xfs_agblock_t fbno; /* start of found freespace */
1164 xfs_extlen_t flen; /* length of found freespace */
1165 int i; /* temp status variable */
1166 xfs_agblock_t rbno; /* returned block number */
1167 xfs_extlen_t rlen; /* length of returned extent */
1168 int forced = 0;
1169
1170 restart:
1171 /*
1172 * Allocate and initialize a cursor for the by-size btree.
1173 */
1174 cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1175 args->agno, XFS_BTNUM_CNT);
1176 bno_cur = NULL;
1177
1178 /*
1179 * Look for an entry >= maxlen+alignment-1 blocks.
1180 */
1181 if ((error = xfs_alloc_lookup_ge(cnt_cur, 0,
1182 args->maxlen + args->alignment - 1, &i)))
1183 goto error0;
1184
1185 /*
1186 * If none or we have busy extents that we cannot allocate from, then
1187 * we have to settle for a smaller extent. In the case that there are
1188 * no large extents, this will return the last entry in the tree unless
1189 * the tree is empty. In the case that there are only busy large
1190 * extents, this will return the largest small extent unless there
1191 * are no smaller extents available.
1192 */
1193 if (!i || forced > 1) {
1194 error = xfs_alloc_ag_vextent_small(args, cnt_cur,
1195 &fbno, &flen, &i);
1196 if (error)
1197 goto error0;
1198 if (i == 0 || flen == 0) {
1199 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1200 trace_xfs_alloc_size_noentry(args);
1201 return 0;
1202 }
1203 ASSERT(i == 1);
1204 xfs_alloc_compute_aligned(args, fbno, flen, &rbno, &rlen);
1205 } else {
1206 /*
1207 * Search for a non-busy extent that is large enough.
1208 * If we are at low space, don't check, or if we fall of
1209 * the end of the btree, turn off the busy check and
1210 * restart.
1211 */
1212 for (;;) {
1213 error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen, &i);
1214 if (error)
1215 goto error0;
1216 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1217
1218 xfs_alloc_compute_aligned(args, fbno, flen,
1219 &rbno, &rlen);
1220
1221 if (rlen >= args->maxlen)
1222 break;
1223
1224 error = xfs_btree_increment(cnt_cur, 0, &i);
1225 if (error)
1226 goto error0;
1227 if (i == 0) {
1228 /*
1229 * Our only valid extents must have been busy.
1230 * Make it unbusy by forcing the log out and
1231 * retrying. If we've been here before, forcing
1232 * the log isn't making the extents available,
1233 * which means they have probably been freed in
1234 * this transaction. In that case, we have to
1235 * give up on them and we'll attempt a minlen
1236 * allocation the next time around.
1237 */
1238 xfs_btree_del_cursor(cnt_cur,
1239 XFS_BTREE_NOERROR);
1240 trace_xfs_alloc_size_busy(args);
1241 if (!forced++)
1242 xfs_log_force(args->mp, XFS_LOG_SYNC);
1243 goto restart;
1244 }
1245 }
1246 }
1247
1248 /*
1249 * In the first case above, we got the last entry in the
1250 * by-size btree. Now we check to see if the space hits maxlen
1251 * once aligned; if not, we search left for something better.
1252 * This can't happen in the second case above.
1253 */
1254 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1255 XFS_WANT_CORRUPTED_GOTO(rlen == 0 ||
1256 (rlen <= flen && rbno + rlen <= fbno + flen), error0);
1257 if (rlen < args->maxlen) {
1258 xfs_agblock_t bestfbno;
1259 xfs_extlen_t bestflen;
1260 xfs_agblock_t bestrbno;
1261 xfs_extlen_t bestrlen;
1262
1263 bestrlen = rlen;
1264 bestrbno = rbno;
1265 bestflen = flen;
1266 bestfbno = fbno;
1267 for (;;) {
1268 if ((error = xfs_btree_decrement(cnt_cur, 0, &i)))
1269 goto error0;
1270 if (i == 0)
1271 break;
1272 if ((error = xfs_alloc_get_rec(cnt_cur, &fbno, &flen,
1273 &i)))
1274 goto error0;
1275 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1276 if (flen < bestrlen)
1277 break;
1278 xfs_alloc_compute_aligned(args, fbno, flen,
1279 &rbno, &rlen);
1280 rlen = XFS_EXTLEN_MIN(args->maxlen, rlen);
1281 XFS_WANT_CORRUPTED_GOTO(rlen == 0 ||
1282 (rlen <= flen && rbno + rlen <= fbno + flen),
1283 error0);
1284 if (rlen > bestrlen) {
1285 bestrlen = rlen;
1286 bestrbno = rbno;
1287 bestflen = flen;
1288 bestfbno = fbno;
1289 if (rlen == args->maxlen)
1290 break;
1291 }
1292 }
1293 if ((error = xfs_alloc_lookup_eq(cnt_cur, bestfbno, bestflen,
1294 &i)))
1295 goto error0;
1296 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1297 rlen = bestrlen;
1298 rbno = bestrbno;
1299 flen = bestflen;
1300 fbno = bestfbno;
1301 }
1302 args->wasfromfl = 0;
1303 /*
1304 * Fix up the length.
1305 */
1306 args->len = rlen;
1307 if (rlen < args->minlen) {
1308 if (!forced++) {
1309 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1310 trace_xfs_alloc_size_busy(args);
1311 xfs_log_force(args->mp, XFS_LOG_SYNC);
1312 goto restart;
1313 }
1314 goto out_nominleft;
1315 }
1316 xfs_alloc_fix_len(args);
1317
1318 if (!xfs_alloc_fix_minleft(args))
1319 goto out_nominleft;
1320 rlen = args->len;
1321 XFS_WANT_CORRUPTED_GOTO(rlen <= flen, error0);
1322 /*
1323 * Allocate and initialize a cursor for the by-block tree.
1324 */
1325 bno_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
1326 args->agno, XFS_BTNUM_BNO);
1327 if ((error = xfs_alloc_fixup_trees(cnt_cur, bno_cur, fbno, flen,
1328 rbno, rlen, XFSA_FIXUP_CNT_OK)))
1329 goto error0;
1330 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1331 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1332 cnt_cur = bno_cur = NULL;
1333 args->len = rlen;
1334 args->agbno = rbno;
1335 XFS_WANT_CORRUPTED_GOTO(
1336 args->agbno + args->len <=
1337 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
1338 error0);
1339 trace_xfs_alloc_size_done(args);
1340 return 0;
1341
1342 error0:
1343 trace_xfs_alloc_size_error(args);
1344 if (cnt_cur)
1345 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1346 if (bno_cur)
1347 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1348 return error;
1349
1350 out_nominleft:
1351 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1352 trace_xfs_alloc_size_nominleft(args);
1353 args->agbno = NULLAGBLOCK;
1354 return 0;
1355 }
1356
1357 /*
1358 * Deal with the case where only small freespaces remain.
1359 * Either return the contents of the last freespace record,
1360 * or allocate space from the freelist if there is nothing in the tree.
1361 */
1362 STATIC int /* error */
xfs_alloc_ag_vextent_small(xfs_alloc_arg_t * args,xfs_btree_cur_t * ccur,xfs_agblock_t * fbnop,xfs_extlen_t * flenp,int * stat)1363 xfs_alloc_ag_vextent_small(
1364 xfs_alloc_arg_t *args, /* allocation argument structure */
1365 xfs_btree_cur_t *ccur, /* by-size cursor */
1366 xfs_agblock_t *fbnop, /* result block number */
1367 xfs_extlen_t *flenp, /* result length */
1368 int *stat) /* status: 0-freelist, 1-normal/none */
1369 {
1370 int error;
1371 xfs_agblock_t fbno;
1372 xfs_extlen_t flen;
1373 int i;
1374
1375 if ((error = xfs_btree_decrement(ccur, 0, &i)))
1376 goto error0;
1377 if (i) {
1378 if ((error = xfs_alloc_get_rec(ccur, &fbno, &flen, &i)))
1379 goto error0;
1380 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1381 }
1382 /*
1383 * Nothing in the btree, try the freelist. Make sure
1384 * to respect minleft even when pulling from the
1385 * freelist.
1386 */
1387 else if (args->minlen == 1 && args->alignment == 1 && !args->isfl &&
1388 (be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_flcount)
1389 > args->minleft)) {
1390 error = xfs_alloc_get_freelist(args->tp, args->agbp, &fbno, 0);
1391 if (error)
1392 goto error0;
1393 if (fbno != NULLAGBLOCK) {
1394 xfs_alloc_busy_reuse(args->mp, args->agno, fbno, 1,
1395 args->userdata);
1396
1397 if (args->userdata) {
1398 xfs_buf_t *bp;
1399
1400 bp = xfs_btree_get_bufs(args->mp, args->tp,
1401 args->agno, fbno, 0);
1402 xfs_trans_binval(args->tp, bp);
1403 }
1404 args->len = 1;
1405 args->agbno = fbno;
1406 XFS_WANT_CORRUPTED_GOTO(
1407 args->agbno + args->len <=
1408 be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length),
1409 error0);
1410 args->wasfromfl = 1;
1411 trace_xfs_alloc_small_freelist(args);
1412 *stat = 0;
1413 return 0;
1414 }
1415 /*
1416 * Nothing in the freelist.
1417 */
1418 else
1419 flen = 0;
1420 }
1421 /*
1422 * Can't allocate from the freelist for some reason.
1423 */
1424 else {
1425 fbno = NULLAGBLOCK;
1426 flen = 0;
1427 }
1428 /*
1429 * Can't do the allocation, give up.
1430 */
1431 if (flen < args->minlen) {
1432 args->agbno = NULLAGBLOCK;
1433 trace_xfs_alloc_small_notenough(args);
1434 flen = 0;
1435 }
1436 *fbnop = fbno;
1437 *flenp = flen;
1438 *stat = 1;
1439 trace_xfs_alloc_small_done(args);
1440 return 0;
1441
1442 error0:
1443 trace_xfs_alloc_small_error(args);
1444 return error;
1445 }
1446
1447 /*
1448 * Free the extent starting at agno/bno for length.
1449 */
1450 STATIC int /* error */
xfs_free_ag_extent(xfs_trans_t * tp,xfs_buf_t * agbp,xfs_agnumber_t agno,xfs_agblock_t bno,xfs_extlen_t len,int isfl)1451 xfs_free_ag_extent(
1452 xfs_trans_t *tp, /* transaction pointer */
1453 xfs_buf_t *agbp, /* buffer for a.g. freelist header */
1454 xfs_agnumber_t agno, /* allocation group number */
1455 xfs_agblock_t bno, /* starting block number */
1456 xfs_extlen_t len, /* length of extent */
1457 int isfl) /* set if is freelist blocks - no sb acctg */
1458 {
1459 xfs_btree_cur_t *bno_cur; /* cursor for by-block btree */
1460 xfs_btree_cur_t *cnt_cur; /* cursor for by-size btree */
1461 int error; /* error return value */
1462 xfs_agblock_t gtbno; /* start of right neighbor block */
1463 xfs_extlen_t gtlen; /* length of right neighbor block */
1464 int haveleft; /* have a left neighbor block */
1465 int haveright; /* have a right neighbor block */
1466 int i; /* temp, result code */
1467 xfs_agblock_t ltbno; /* start of left neighbor block */
1468 xfs_extlen_t ltlen; /* length of left neighbor block */
1469 xfs_mount_t *mp; /* mount point struct for filesystem */
1470 xfs_agblock_t nbno; /* new starting block of freespace */
1471 xfs_extlen_t nlen; /* new length of freespace */
1472 xfs_perag_t *pag; /* per allocation group data */
1473
1474 mp = tp->t_mountp;
1475 /*
1476 * Allocate and initialize a cursor for the by-block btree.
1477 */
1478 bno_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_BNO);
1479 cnt_cur = NULL;
1480 /*
1481 * Look for a neighboring block on the left (lower block numbers)
1482 * that is contiguous with this space.
1483 */
1484 if ((error = xfs_alloc_lookup_le(bno_cur, bno, len, &haveleft)))
1485 goto error0;
1486 if (haveleft) {
1487 /*
1488 * There is a block to our left.
1489 */
1490 if ((error = xfs_alloc_get_rec(bno_cur, <bno, <len, &i)))
1491 goto error0;
1492 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1493 /*
1494 * It's not contiguous, though.
1495 */
1496 if (ltbno + ltlen < bno)
1497 haveleft = 0;
1498 else {
1499 /*
1500 * If this failure happens the request to free this
1501 * space was invalid, it's (partly) already free.
1502 * Very bad.
1503 */
1504 XFS_WANT_CORRUPTED_GOTO(ltbno + ltlen <= bno, error0);
1505 }
1506 }
1507 /*
1508 * Look for a neighboring block on the right (higher block numbers)
1509 * that is contiguous with this space.
1510 */
1511 if ((error = xfs_btree_increment(bno_cur, 0, &haveright)))
1512 goto error0;
1513 if (haveright) {
1514 /*
1515 * There is a block to our right.
1516 */
1517 if ((error = xfs_alloc_get_rec(bno_cur, >bno, >len, &i)))
1518 goto error0;
1519 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1520 /*
1521 * It's not contiguous, though.
1522 */
1523 if (bno + len < gtbno)
1524 haveright = 0;
1525 else {
1526 /*
1527 * If this failure happens the request to free this
1528 * space was invalid, it's (partly) already free.
1529 * Very bad.
1530 */
1531 XFS_WANT_CORRUPTED_GOTO(gtbno >= bno + len, error0);
1532 }
1533 }
1534 /*
1535 * Now allocate and initialize a cursor for the by-size tree.
1536 */
1537 cnt_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_CNT);
1538 /*
1539 * Have both left and right contiguous neighbors.
1540 * Merge all three into a single free block.
1541 */
1542 if (haveleft && haveright) {
1543 /*
1544 * Delete the old by-size entry on the left.
1545 */
1546 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
1547 goto error0;
1548 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1549 if ((error = xfs_btree_delete(cnt_cur, &i)))
1550 goto error0;
1551 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1552 /*
1553 * Delete the old by-size entry on the right.
1554 */
1555 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
1556 goto error0;
1557 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1558 if ((error = xfs_btree_delete(cnt_cur, &i)))
1559 goto error0;
1560 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1561 /*
1562 * Delete the old by-block entry for the right block.
1563 */
1564 if ((error = xfs_btree_delete(bno_cur, &i)))
1565 goto error0;
1566 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1567 /*
1568 * Move the by-block cursor back to the left neighbor.
1569 */
1570 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
1571 goto error0;
1572 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1573 #ifdef DEBUG
1574 /*
1575 * Check that this is the right record: delete didn't
1576 * mangle the cursor.
1577 */
1578 {
1579 xfs_agblock_t xxbno;
1580 xfs_extlen_t xxlen;
1581
1582 if ((error = xfs_alloc_get_rec(bno_cur, &xxbno, &xxlen,
1583 &i)))
1584 goto error0;
1585 XFS_WANT_CORRUPTED_GOTO(
1586 i == 1 && xxbno == ltbno && xxlen == ltlen,
1587 error0);
1588 }
1589 #endif
1590 /*
1591 * Update remaining by-block entry to the new, joined block.
1592 */
1593 nbno = ltbno;
1594 nlen = len + ltlen + gtlen;
1595 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1596 goto error0;
1597 }
1598 /*
1599 * Have only a left contiguous neighbor.
1600 * Merge it together with the new freespace.
1601 */
1602 else if (haveleft) {
1603 /*
1604 * Delete the old by-size entry on the left.
1605 */
1606 if ((error = xfs_alloc_lookup_eq(cnt_cur, ltbno, ltlen, &i)))
1607 goto error0;
1608 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1609 if ((error = xfs_btree_delete(cnt_cur, &i)))
1610 goto error0;
1611 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1612 /*
1613 * Back up the by-block cursor to the left neighbor, and
1614 * update its length.
1615 */
1616 if ((error = xfs_btree_decrement(bno_cur, 0, &i)))
1617 goto error0;
1618 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1619 nbno = ltbno;
1620 nlen = len + ltlen;
1621 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1622 goto error0;
1623 }
1624 /*
1625 * Have only a right contiguous neighbor.
1626 * Merge it together with the new freespace.
1627 */
1628 else if (haveright) {
1629 /*
1630 * Delete the old by-size entry on the right.
1631 */
1632 if ((error = xfs_alloc_lookup_eq(cnt_cur, gtbno, gtlen, &i)))
1633 goto error0;
1634 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1635 if ((error = xfs_btree_delete(cnt_cur, &i)))
1636 goto error0;
1637 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1638 /*
1639 * Update the starting block and length of the right
1640 * neighbor in the by-block tree.
1641 */
1642 nbno = bno;
1643 nlen = len + gtlen;
1644 if ((error = xfs_alloc_update(bno_cur, nbno, nlen)))
1645 goto error0;
1646 }
1647 /*
1648 * No contiguous neighbors.
1649 * Insert the new freespace into the by-block tree.
1650 */
1651 else {
1652 nbno = bno;
1653 nlen = len;
1654 if ((error = xfs_btree_insert(bno_cur, &i)))
1655 goto error0;
1656 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1657 }
1658 xfs_btree_del_cursor(bno_cur, XFS_BTREE_NOERROR);
1659 bno_cur = NULL;
1660 /*
1661 * In all cases we need to insert the new freespace in the by-size tree.
1662 */
1663 if ((error = xfs_alloc_lookup_eq(cnt_cur, nbno, nlen, &i)))
1664 goto error0;
1665 XFS_WANT_CORRUPTED_GOTO(i == 0, error0);
1666 if ((error = xfs_btree_insert(cnt_cur, &i)))
1667 goto error0;
1668 XFS_WANT_CORRUPTED_GOTO(i == 1, error0);
1669 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1670 cnt_cur = NULL;
1671
1672 /*
1673 * Update the freespace totals in the ag and superblock.
1674 */
1675 pag = xfs_perag_get(mp, agno);
1676 error = xfs_alloc_update_counters(tp, pag, agbp, len);
1677 xfs_perag_put(pag);
1678 if (error)
1679 goto error0;
1680
1681 if (!isfl)
1682 xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, (long)len);
1683 XFS_STATS_INC(xs_freex);
1684 XFS_STATS_ADD(xs_freeb, len);
1685
1686 trace_xfs_free_extent(mp, agno, bno, len, isfl, haveleft, haveright);
1687
1688 return 0;
1689
1690 error0:
1691 trace_xfs_free_extent(mp, agno, bno, len, isfl, -1, -1);
1692 if (bno_cur)
1693 xfs_btree_del_cursor(bno_cur, XFS_BTREE_ERROR);
1694 if (cnt_cur)
1695 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_ERROR);
1696 return error;
1697 }
1698
1699 /*
1700 * Visible (exported) allocation/free functions.
1701 * Some of these are used just by xfs_alloc_btree.c and this file.
1702 */
1703
1704 /*
1705 * Compute and fill in value of m_ag_maxlevels.
1706 */
1707 void
xfs_alloc_compute_maxlevels(xfs_mount_t * mp)1708 xfs_alloc_compute_maxlevels(
1709 xfs_mount_t *mp) /* file system mount structure */
1710 {
1711 int level;
1712 uint maxblocks;
1713 uint maxleafents;
1714 int minleafrecs;
1715 int minnoderecs;
1716
1717 maxleafents = (mp->m_sb.sb_agblocks + 1) / 2;
1718 minleafrecs = mp->m_alloc_mnr[0];
1719 minnoderecs = mp->m_alloc_mnr[1];
1720 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
1721 for (level = 1; maxblocks > 1; level++)
1722 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
1723 mp->m_ag_maxlevels = level;
1724 }
1725
1726 /*
1727 * Find the length of the longest extent in an AG.
1728 */
1729 xfs_extlen_t
xfs_alloc_longest_free_extent(struct xfs_mount * mp,struct xfs_perag * pag)1730 xfs_alloc_longest_free_extent(
1731 struct xfs_mount *mp,
1732 struct xfs_perag *pag)
1733 {
1734 xfs_extlen_t need, delta = 0;
1735
1736 need = XFS_MIN_FREELIST_PAG(pag, mp);
1737 if (need > pag->pagf_flcount)
1738 delta = need - pag->pagf_flcount;
1739
1740 if (pag->pagf_longest > delta)
1741 return pag->pagf_longest - delta;
1742 return pag->pagf_flcount > 0 || pag->pagf_longest > 0;
1743 }
1744
1745 /*
1746 * Decide whether to use this allocation group for this allocation.
1747 * If so, fix up the btree freelist's size.
1748 */
1749 STATIC int /* error */
xfs_alloc_fix_freelist(xfs_alloc_arg_t * args,int flags)1750 xfs_alloc_fix_freelist(
1751 xfs_alloc_arg_t *args, /* allocation argument structure */
1752 int flags) /* XFS_ALLOC_FLAG_... */
1753 {
1754 xfs_buf_t *agbp; /* agf buffer pointer */
1755 xfs_agf_t *agf; /* a.g. freespace structure pointer */
1756 xfs_buf_t *agflbp;/* agfl buffer pointer */
1757 xfs_agblock_t bno; /* freelist block */
1758 xfs_extlen_t delta; /* new blocks needed in freelist */
1759 int error; /* error result code */
1760 xfs_extlen_t longest;/* longest extent in allocation group */
1761 xfs_mount_t *mp; /* file system mount point structure */
1762 xfs_extlen_t need; /* total blocks needed in freelist */
1763 xfs_perag_t *pag; /* per-ag information structure */
1764 xfs_alloc_arg_t targs; /* local allocation arguments */
1765 xfs_trans_t *tp; /* transaction pointer */
1766
1767 mp = args->mp;
1768
1769 pag = args->pag;
1770 tp = args->tp;
1771 if (!pag->pagf_init) {
1772 if ((error = xfs_alloc_read_agf(mp, tp, args->agno, flags,
1773 &agbp)))
1774 return error;
1775 if (!pag->pagf_init) {
1776 ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
1777 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
1778 args->agbp = NULL;
1779 return 0;
1780 }
1781 } else
1782 agbp = NULL;
1783
1784 /*
1785 * If this is a metadata preferred pag and we are user data
1786 * then try somewhere else if we are not being asked to
1787 * try harder at this point
1788 */
1789 if (pag->pagf_metadata && args->userdata &&
1790 (flags & XFS_ALLOC_FLAG_TRYLOCK)) {
1791 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
1792 args->agbp = NULL;
1793 return 0;
1794 }
1795
1796 if (!(flags & XFS_ALLOC_FLAG_FREEING)) {
1797 /*
1798 * If it looks like there isn't a long enough extent, or enough
1799 * total blocks, reject it.
1800 */
1801 need = XFS_MIN_FREELIST_PAG(pag, mp);
1802 longest = xfs_alloc_longest_free_extent(mp, pag);
1803 if ((args->minlen + args->alignment + args->minalignslop - 1) >
1804 longest ||
1805 ((int)(pag->pagf_freeblks + pag->pagf_flcount -
1806 need - args->total) < (int)args->minleft)) {
1807 if (agbp)
1808 xfs_trans_brelse(tp, agbp);
1809 args->agbp = NULL;
1810 return 0;
1811 }
1812 }
1813
1814 /*
1815 * Get the a.g. freespace buffer.
1816 * Can fail if we're not blocking on locks, and it's held.
1817 */
1818 if (agbp == NULL) {
1819 if ((error = xfs_alloc_read_agf(mp, tp, args->agno, flags,
1820 &agbp)))
1821 return error;
1822 if (agbp == NULL) {
1823 ASSERT(flags & XFS_ALLOC_FLAG_TRYLOCK);
1824 ASSERT(!(flags & XFS_ALLOC_FLAG_FREEING));
1825 args->agbp = NULL;
1826 return 0;
1827 }
1828 }
1829 /*
1830 * Figure out how many blocks we should have in the freelist.
1831 */
1832 agf = XFS_BUF_TO_AGF(agbp);
1833 need = XFS_MIN_FREELIST(agf, mp);
1834 /*
1835 * If there isn't enough total or single-extent, reject it.
1836 */
1837 if (!(flags & XFS_ALLOC_FLAG_FREEING)) {
1838 delta = need > be32_to_cpu(agf->agf_flcount) ?
1839 (need - be32_to_cpu(agf->agf_flcount)) : 0;
1840 longest = be32_to_cpu(agf->agf_longest);
1841 longest = (longest > delta) ? (longest - delta) :
1842 (be32_to_cpu(agf->agf_flcount) > 0 || longest > 0);
1843 if ((args->minlen + args->alignment + args->minalignslop - 1) >
1844 longest ||
1845 ((int)(be32_to_cpu(agf->agf_freeblks) +
1846 be32_to_cpu(agf->agf_flcount) - need - args->total) <
1847 (int)args->minleft)) {
1848 xfs_trans_brelse(tp, agbp);
1849 args->agbp = NULL;
1850 return 0;
1851 }
1852 }
1853 /*
1854 * Make the freelist shorter if it's too long.
1855 */
1856 while (be32_to_cpu(agf->agf_flcount) > need) {
1857 xfs_buf_t *bp;
1858
1859 error = xfs_alloc_get_freelist(tp, agbp, &bno, 0);
1860 if (error)
1861 return error;
1862 if ((error = xfs_free_ag_extent(tp, agbp, args->agno, bno, 1, 1)))
1863 return error;
1864 bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0);
1865 xfs_trans_binval(tp, bp);
1866 }
1867 /*
1868 * Initialize the args structure.
1869 */
1870 targs.tp = tp;
1871 targs.mp = mp;
1872 targs.agbp = agbp;
1873 targs.agno = args->agno;
1874 targs.mod = targs.minleft = targs.wasdel = targs.userdata =
1875 targs.minalignslop = 0;
1876 targs.alignment = targs.minlen = targs.prod = targs.isfl = 1;
1877 targs.type = XFS_ALLOCTYPE_THIS_AG;
1878 targs.pag = pag;
1879 if ((error = xfs_alloc_read_agfl(mp, tp, targs.agno, &agflbp)))
1880 return error;
1881 /*
1882 * Make the freelist longer if it's too short.
1883 */
1884 while (be32_to_cpu(agf->agf_flcount) < need) {
1885 targs.agbno = 0;
1886 targs.maxlen = need - be32_to_cpu(agf->agf_flcount);
1887 /*
1888 * Allocate as many blocks as possible at once.
1889 */
1890 if ((error = xfs_alloc_ag_vextent(&targs))) {
1891 xfs_trans_brelse(tp, agflbp);
1892 return error;
1893 }
1894 /*
1895 * Stop if we run out. Won't happen if callers are obeying
1896 * the restrictions correctly. Can happen for free calls
1897 * on a completely full ag.
1898 */
1899 if (targs.agbno == NULLAGBLOCK) {
1900 if (flags & XFS_ALLOC_FLAG_FREEING)
1901 break;
1902 xfs_trans_brelse(tp, agflbp);
1903 args->agbp = NULL;
1904 return 0;
1905 }
1906 /*
1907 * Put each allocated block on the list.
1908 */
1909 for (bno = targs.agbno; bno < targs.agbno + targs.len; bno++) {
1910 error = xfs_alloc_put_freelist(tp, agbp,
1911 agflbp, bno, 0);
1912 if (error)
1913 return error;
1914 }
1915 }
1916 xfs_trans_brelse(tp, agflbp);
1917 args->agbp = agbp;
1918 return 0;
1919 }
1920
1921 /*
1922 * Get a block from the freelist.
1923 * Returns with the buffer for the block gotten.
1924 */
1925 int /* error */
xfs_alloc_get_freelist(xfs_trans_t * tp,xfs_buf_t * agbp,xfs_agblock_t * bnop,int btreeblk)1926 xfs_alloc_get_freelist(
1927 xfs_trans_t *tp, /* transaction pointer */
1928 xfs_buf_t *agbp, /* buffer containing the agf structure */
1929 xfs_agblock_t *bnop, /* block address retrieved from freelist */
1930 int btreeblk) /* destination is a AGF btree */
1931 {
1932 xfs_agf_t *agf; /* a.g. freespace structure */
1933 xfs_agfl_t *agfl; /* a.g. freelist structure */
1934 xfs_buf_t *agflbp;/* buffer for a.g. freelist structure */
1935 xfs_agblock_t bno; /* block number returned */
1936 int error;
1937 int logflags;
1938 xfs_mount_t *mp; /* mount structure */
1939 xfs_perag_t *pag; /* per allocation group data */
1940
1941 agf = XFS_BUF_TO_AGF(agbp);
1942 /*
1943 * Freelist is empty, give up.
1944 */
1945 if (!agf->agf_flcount) {
1946 *bnop = NULLAGBLOCK;
1947 return 0;
1948 }
1949 /*
1950 * Read the array of free blocks.
1951 */
1952 mp = tp->t_mountp;
1953 if ((error = xfs_alloc_read_agfl(mp, tp,
1954 be32_to_cpu(agf->agf_seqno), &agflbp)))
1955 return error;
1956 agfl = XFS_BUF_TO_AGFL(agflbp);
1957 /*
1958 * Get the block number and update the data structures.
1959 */
1960 bno = be32_to_cpu(agfl->agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
1961 be32_add_cpu(&agf->agf_flfirst, 1);
1962 xfs_trans_brelse(tp, agflbp);
1963 if (be32_to_cpu(agf->agf_flfirst) == XFS_AGFL_SIZE(mp))
1964 agf->agf_flfirst = 0;
1965
1966 pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
1967 be32_add_cpu(&agf->agf_flcount, -1);
1968 xfs_trans_agflist_delta(tp, -1);
1969 pag->pagf_flcount--;
1970 xfs_perag_put(pag);
1971
1972 logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT;
1973 if (btreeblk) {
1974 be32_add_cpu(&agf->agf_btreeblks, 1);
1975 pag->pagf_btreeblks++;
1976 logflags |= XFS_AGF_BTREEBLKS;
1977 }
1978
1979 xfs_alloc_log_agf(tp, agbp, logflags);
1980 *bnop = bno;
1981
1982 return 0;
1983 }
1984
1985 /*
1986 * Log the given fields from the agf structure.
1987 */
1988 void
xfs_alloc_log_agf(xfs_trans_t * tp,xfs_buf_t * bp,int fields)1989 xfs_alloc_log_agf(
1990 xfs_trans_t *tp, /* transaction pointer */
1991 xfs_buf_t *bp, /* buffer for a.g. freelist header */
1992 int fields) /* mask of fields to be logged (XFS_AGF_...) */
1993 {
1994 int first; /* first byte offset */
1995 int last; /* last byte offset */
1996 static const short offsets[] = {
1997 offsetof(xfs_agf_t, agf_magicnum),
1998 offsetof(xfs_agf_t, agf_versionnum),
1999 offsetof(xfs_agf_t, agf_seqno),
2000 offsetof(xfs_agf_t, agf_length),
2001 offsetof(xfs_agf_t, agf_roots[0]),
2002 offsetof(xfs_agf_t, agf_levels[0]),
2003 offsetof(xfs_agf_t, agf_flfirst),
2004 offsetof(xfs_agf_t, agf_fllast),
2005 offsetof(xfs_agf_t, agf_flcount),
2006 offsetof(xfs_agf_t, agf_freeblks),
2007 offsetof(xfs_agf_t, agf_longest),
2008 offsetof(xfs_agf_t, agf_btreeblks),
2009 sizeof(xfs_agf_t)
2010 };
2011
2012 trace_xfs_agf(tp->t_mountp, XFS_BUF_TO_AGF(bp), fields, _RET_IP_);
2013
2014 xfs_btree_offsets(fields, offsets, XFS_AGF_NUM_BITS, &first, &last);
2015 xfs_trans_log_buf(tp, bp, (uint)first, (uint)last);
2016 }
2017
2018 /*
2019 * Interface for inode allocation to force the pag data to be initialized.
2020 */
2021 int /* error */
xfs_alloc_pagf_init(xfs_mount_t * mp,xfs_trans_t * tp,xfs_agnumber_t agno,int flags)2022 xfs_alloc_pagf_init(
2023 xfs_mount_t *mp, /* file system mount structure */
2024 xfs_trans_t *tp, /* transaction pointer */
2025 xfs_agnumber_t agno, /* allocation group number */
2026 int flags) /* XFS_ALLOC_FLAGS_... */
2027 {
2028 xfs_buf_t *bp;
2029 int error;
2030
2031 if ((error = xfs_alloc_read_agf(mp, tp, agno, flags, &bp)))
2032 return error;
2033 if (bp)
2034 xfs_trans_brelse(tp, bp);
2035 return 0;
2036 }
2037
2038 /*
2039 * Put the block on the freelist for the allocation group.
2040 */
2041 int /* error */
xfs_alloc_put_freelist(xfs_trans_t * tp,xfs_buf_t * agbp,xfs_buf_t * agflbp,xfs_agblock_t bno,int btreeblk)2042 xfs_alloc_put_freelist(
2043 xfs_trans_t *tp, /* transaction pointer */
2044 xfs_buf_t *agbp, /* buffer for a.g. freelist header */
2045 xfs_buf_t *agflbp,/* buffer for a.g. free block array */
2046 xfs_agblock_t bno, /* block being freed */
2047 int btreeblk) /* block came from a AGF btree */
2048 {
2049 xfs_agf_t *agf; /* a.g. freespace structure */
2050 xfs_agfl_t *agfl; /* a.g. free block array */
2051 __be32 *blockp;/* pointer to array entry */
2052 int error;
2053 int logflags;
2054 xfs_mount_t *mp; /* mount structure */
2055 xfs_perag_t *pag; /* per allocation group data */
2056
2057 agf = XFS_BUF_TO_AGF(agbp);
2058 mp = tp->t_mountp;
2059
2060 if (!agflbp && (error = xfs_alloc_read_agfl(mp, tp,
2061 be32_to_cpu(agf->agf_seqno), &agflbp)))
2062 return error;
2063 agfl = XFS_BUF_TO_AGFL(agflbp);
2064 be32_add_cpu(&agf->agf_fllast, 1);
2065 if (be32_to_cpu(agf->agf_fllast) == XFS_AGFL_SIZE(mp))
2066 agf->agf_fllast = 0;
2067
2068 pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
2069 be32_add_cpu(&agf->agf_flcount, 1);
2070 xfs_trans_agflist_delta(tp, 1);
2071 pag->pagf_flcount++;
2072
2073 logflags = XFS_AGF_FLLAST | XFS_AGF_FLCOUNT;
2074 if (btreeblk) {
2075 be32_add_cpu(&agf->agf_btreeblks, -1);
2076 pag->pagf_btreeblks--;
2077 logflags |= XFS_AGF_BTREEBLKS;
2078 }
2079 xfs_perag_put(pag);
2080
2081 xfs_alloc_log_agf(tp, agbp, logflags);
2082
2083 ASSERT(be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp));
2084 blockp = &agfl->agfl_bno[be32_to_cpu(agf->agf_fllast)];
2085 *blockp = cpu_to_be32(bno);
2086 xfs_alloc_log_agf(tp, agbp, logflags);
2087 xfs_trans_log_buf(tp, agflbp,
2088 (int)((xfs_caddr_t)blockp - (xfs_caddr_t)agfl),
2089 (int)((xfs_caddr_t)blockp - (xfs_caddr_t)agfl +
2090 sizeof(xfs_agblock_t) - 1));
2091 return 0;
2092 }
2093
2094 /*
2095 * Read in the allocation group header (free/alloc section).
2096 */
2097 int /* error */
xfs_read_agf(struct xfs_mount * mp,struct xfs_trans * tp,xfs_agnumber_t agno,int flags,struct xfs_buf ** bpp)2098 xfs_read_agf(
2099 struct xfs_mount *mp, /* mount point structure */
2100 struct xfs_trans *tp, /* transaction pointer */
2101 xfs_agnumber_t agno, /* allocation group number */
2102 int flags, /* XFS_BUF_ */
2103 struct xfs_buf **bpp) /* buffer for the ag freelist header */
2104 {
2105 struct xfs_agf *agf; /* ag freelist header */
2106 int agf_ok; /* set if agf is consistent */
2107 int error;
2108
2109 ASSERT(agno != NULLAGNUMBER);
2110 error = xfs_trans_read_buf(
2111 mp, tp, mp->m_ddev_targp,
2112 XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
2113 XFS_FSS_TO_BB(mp, 1), flags, bpp);
2114 if (error)
2115 return error;
2116 if (!*bpp)
2117 return 0;
2118
2119 ASSERT(!(*bpp)->b_error);
2120 agf = XFS_BUF_TO_AGF(*bpp);
2121
2122 /*
2123 * Validate the magic number of the agf block.
2124 */
2125 agf_ok =
2126 agf->agf_magicnum == cpu_to_be32(XFS_AGF_MAGIC) &&
2127 XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) &&
2128 be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) &&
2129 be32_to_cpu(agf->agf_flfirst) < XFS_AGFL_SIZE(mp) &&
2130 be32_to_cpu(agf->agf_fllast) < XFS_AGFL_SIZE(mp) &&
2131 be32_to_cpu(agf->agf_flcount) <= XFS_AGFL_SIZE(mp) &&
2132 be32_to_cpu(agf->agf_seqno) == agno;
2133 if (xfs_sb_version_haslazysbcount(&mp->m_sb))
2134 agf_ok = agf_ok && be32_to_cpu(agf->agf_btreeblks) <=
2135 be32_to_cpu(agf->agf_length);
2136 if (unlikely(XFS_TEST_ERROR(!agf_ok, mp, XFS_ERRTAG_ALLOC_READ_AGF,
2137 XFS_RANDOM_ALLOC_READ_AGF))) {
2138 XFS_CORRUPTION_ERROR("xfs_alloc_read_agf",
2139 XFS_ERRLEVEL_LOW, mp, agf);
2140 xfs_trans_brelse(tp, *bpp);
2141 return XFS_ERROR(EFSCORRUPTED);
2142 }
2143 xfs_buf_set_ref(*bpp, XFS_AGF_REF);
2144 return 0;
2145 }
2146
2147 /*
2148 * Read in the allocation group header (free/alloc section).
2149 */
2150 int /* error */
xfs_alloc_read_agf(struct xfs_mount * mp,struct xfs_trans * tp,xfs_agnumber_t agno,int flags,struct xfs_buf ** bpp)2151 xfs_alloc_read_agf(
2152 struct xfs_mount *mp, /* mount point structure */
2153 struct xfs_trans *tp, /* transaction pointer */
2154 xfs_agnumber_t agno, /* allocation group number */
2155 int flags, /* XFS_ALLOC_FLAG_... */
2156 struct xfs_buf **bpp) /* buffer for the ag freelist header */
2157 {
2158 struct xfs_agf *agf; /* ag freelist header */
2159 struct xfs_perag *pag; /* per allocation group data */
2160 int error;
2161
2162 ASSERT(agno != NULLAGNUMBER);
2163
2164 error = xfs_read_agf(mp, tp, agno,
2165 (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
2166 bpp);
2167 if (error)
2168 return error;
2169 if (!*bpp)
2170 return 0;
2171 ASSERT(!(*bpp)->b_error);
2172
2173 agf = XFS_BUF_TO_AGF(*bpp);
2174 pag = xfs_perag_get(mp, agno);
2175 if (!pag->pagf_init) {
2176 pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
2177 pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
2178 pag->pagf_flcount = be32_to_cpu(agf->agf_flcount);
2179 pag->pagf_longest = be32_to_cpu(agf->agf_longest);
2180 pag->pagf_levels[XFS_BTNUM_BNOi] =
2181 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]);
2182 pag->pagf_levels[XFS_BTNUM_CNTi] =
2183 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
2184 spin_lock_init(&pag->pagb_lock);
2185 pag->pagb_count = 0;
2186 pag->pagb_tree = RB_ROOT;
2187 pag->pagf_init = 1;
2188 }
2189 #ifdef DEBUG
2190 else if (!XFS_FORCED_SHUTDOWN(mp)) {
2191 ASSERT(pag->pagf_freeblks == be32_to_cpu(agf->agf_freeblks));
2192 ASSERT(pag->pagf_btreeblks == be32_to_cpu(agf->agf_btreeblks));
2193 ASSERT(pag->pagf_flcount == be32_to_cpu(agf->agf_flcount));
2194 ASSERT(pag->pagf_longest == be32_to_cpu(agf->agf_longest));
2195 ASSERT(pag->pagf_levels[XFS_BTNUM_BNOi] ==
2196 be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]));
2197 ASSERT(pag->pagf_levels[XFS_BTNUM_CNTi] ==
2198 be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]));
2199 }
2200 #endif
2201 xfs_perag_put(pag);
2202 return 0;
2203 }
2204
2205 /*
2206 * Allocate an extent (variable-size).
2207 * Depending on the allocation type, we either look in a single allocation
2208 * group or loop over the allocation groups to find the result.
2209 */
2210 int /* error */
__xfs_alloc_vextent(xfs_alloc_arg_t * args)2211 __xfs_alloc_vextent(
2212 xfs_alloc_arg_t *args) /* allocation argument structure */
2213 {
2214 xfs_agblock_t agsize; /* allocation group size */
2215 int error;
2216 int flags; /* XFS_ALLOC_FLAG_... locking flags */
2217 xfs_extlen_t minleft;/* minimum left value, temp copy */
2218 xfs_mount_t *mp; /* mount structure pointer */
2219 xfs_agnumber_t sagno; /* starting allocation group number */
2220 xfs_alloctype_t type; /* input allocation type */
2221 int bump_rotor = 0;
2222 int no_min = 0;
2223 xfs_agnumber_t rotorstep = xfs_rotorstep; /* inode32 agf stepper */
2224
2225 mp = args->mp;
2226 type = args->otype = args->type;
2227 args->agbno = NULLAGBLOCK;
2228 /*
2229 * Just fix this up, for the case where the last a.g. is shorter
2230 * (or there's only one a.g.) and the caller couldn't easily figure
2231 * that out (xfs_bmap_alloc).
2232 */
2233 agsize = mp->m_sb.sb_agblocks;
2234 if (args->maxlen > agsize)
2235 args->maxlen = agsize;
2236 if (args->alignment == 0)
2237 args->alignment = 1;
2238 ASSERT(XFS_FSB_TO_AGNO(mp, args->fsbno) < mp->m_sb.sb_agcount);
2239 ASSERT(XFS_FSB_TO_AGBNO(mp, args->fsbno) < agsize);
2240 ASSERT(args->minlen <= args->maxlen);
2241 ASSERT(args->minlen <= agsize);
2242 ASSERT(args->mod < args->prod);
2243 if (XFS_FSB_TO_AGNO(mp, args->fsbno) >= mp->m_sb.sb_agcount ||
2244 XFS_FSB_TO_AGBNO(mp, args->fsbno) >= agsize ||
2245 args->minlen > args->maxlen || args->minlen > agsize ||
2246 args->mod >= args->prod) {
2247 args->fsbno = NULLFSBLOCK;
2248 trace_xfs_alloc_vextent_badargs(args);
2249 return 0;
2250 }
2251 minleft = args->minleft;
2252
2253 switch (type) {
2254 case XFS_ALLOCTYPE_THIS_AG:
2255 case XFS_ALLOCTYPE_NEAR_BNO:
2256 case XFS_ALLOCTYPE_THIS_BNO:
2257 /*
2258 * These three force us into a single a.g.
2259 */
2260 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2261 args->pag = xfs_perag_get(mp, args->agno);
2262 args->minleft = 0;
2263 error = xfs_alloc_fix_freelist(args, 0);
2264 args->minleft = minleft;
2265 if (error) {
2266 trace_xfs_alloc_vextent_nofix(args);
2267 goto error0;
2268 }
2269 if (!args->agbp) {
2270 trace_xfs_alloc_vextent_noagbp(args);
2271 break;
2272 }
2273 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
2274 if ((error = xfs_alloc_ag_vextent(args)))
2275 goto error0;
2276 break;
2277 case XFS_ALLOCTYPE_START_BNO:
2278 /*
2279 * Try near allocation first, then anywhere-in-ag after
2280 * the first a.g. fails.
2281 */
2282 if ((args->userdata == XFS_ALLOC_INITIAL_USER_DATA) &&
2283 (mp->m_flags & XFS_MOUNT_32BITINODES)) {
2284 args->fsbno = XFS_AGB_TO_FSB(mp,
2285 ((mp->m_agfrotor / rotorstep) %
2286 mp->m_sb.sb_agcount), 0);
2287 bump_rotor = 1;
2288 }
2289 args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
2290 args->type = XFS_ALLOCTYPE_NEAR_BNO;
2291 /* FALLTHROUGH */
2292 case XFS_ALLOCTYPE_ANY_AG:
2293 case XFS_ALLOCTYPE_START_AG:
2294 case XFS_ALLOCTYPE_FIRST_AG:
2295 /*
2296 * Rotate through the allocation groups looking for a winner.
2297 */
2298 if (type == XFS_ALLOCTYPE_ANY_AG) {
2299 /*
2300 * Start with the last place we left off.
2301 */
2302 args->agno = sagno = (mp->m_agfrotor / rotorstep) %
2303 mp->m_sb.sb_agcount;
2304 args->type = XFS_ALLOCTYPE_THIS_AG;
2305 flags = XFS_ALLOC_FLAG_TRYLOCK;
2306 } else if (type == XFS_ALLOCTYPE_FIRST_AG) {
2307 /*
2308 * Start with allocation group given by bno.
2309 */
2310 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2311 args->type = XFS_ALLOCTYPE_THIS_AG;
2312 sagno = 0;
2313 flags = 0;
2314 } else {
2315 if (type == XFS_ALLOCTYPE_START_AG)
2316 args->type = XFS_ALLOCTYPE_THIS_AG;
2317 /*
2318 * Start with the given allocation group.
2319 */
2320 args->agno = sagno = XFS_FSB_TO_AGNO(mp, args->fsbno);
2321 flags = XFS_ALLOC_FLAG_TRYLOCK;
2322 }
2323 /*
2324 * Loop over allocation groups twice; first time with
2325 * trylock set, second time without.
2326 */
2327 for (;;) {
2328 args->pag = xfs_perag_get(mp, args->agno);
2329 if (no_min) args->minleft = 0;
2330 error = xfs_alloc_fix_freelist(args, flags);
2331 args->minleft = minleft;
2332 if (error) {
2333 trace_xfs_alloc_vextent_nofix(args);
2334 goto error0;
2335 }
2336 /*
2337 * If we get a buffer back then the allocation will fly.
2338 */
2339 if (args->agbp) {
2340 if ((error = xfs_alloc_ag_vextent(args)))
2341 goto error0;
2342 break;
2343 }
2344
2345 trace_xfs_alloc_vextent_loopfailed(args);
2346
2347 /*
2348 * Didn't work, figure out the next iteration.
2349 */
2350 if (args->agno == sagno &&
2351 type == XFS_ALLOCTYPE_START_BNO)
2352 args->type = XFS_ALLOCTYPE_THIS_AG;
2353 /*
2354 * For the first allocation, we can try any AG to get
2355 * space. However, if we already have allocated a
2356 * block, we don't want to try AGs whose number is below
2357 * sagno. Otherwise, we may end up with out-of-order
2358 * locking of AGF, which might cause deadlock.
2359 */
2360 if (++(args->agno) == mp->m_sb.sb_agcount) {
2361 if (args->firstblock != NULLFSBLOCK)
2362 args->agno = sagno;
2363 else
2364 args->agno = 0;
2365 }
2366 /*
2367 * Reached the starting a.g., must either be done
2368 * or switch to non-trylock mode.
2369 */
2370 if (args->agno == sagno) {
2371 if (no_min == 1) {
2372 args->agbno = NULLAGBLOCK;
2373 trace_xfs_alloc_vextent_allfailed(args);
2374 break;
2375 }
2376 if (flags == 0) {
2377 no_min = 1;
2378 } else {
2379 flags = 0;
2380 if (type == XFS_ALLOCTYPE_START_BNO) {
2381 args->agbno = XFS_FSB_TO_AGBNO(mp,
2382 args->fsbno);
2383 args->type = XFS_ALLOCTYPE_NEAR_BNO;
2384 }
2385 }
2386 }
2387 xfs_perag_put(args->pag);
2388 }
2389 if (bump_rotor || (type == XFS_ALLOCTYPE_ANY_AG)) {
2390 if (args->agno == sagno)
2391 mp->m_agfrotor = (mp->m_agfrotor + 1) %
2392 (mp->m_sb.sb_agcount * rotorstep);
2393 else
2394 mp->m_agfrotor = (args->agno * rotorstep + 1) %
2395 (mp->m_sb.sb_agcount * rotorstep);
2396 }
2397 break;
2398 default:
2399 ASSERT(0);
2400 /* NOTREACHED */
2401 }
2402 if (args->agbno == NULLAGBLOCK)
2403 args->fsbno = NULLFSBLOCK;
2404 else {
2405 args->fsbno = XFS_AGB_TO_FSB(mp, args->agno, args->agbno);
2406 #ifdef DEBUG
2407 ASSERT(args->len >= args->minlen);
2408 ASSERT(args->len <= args->maxlen);
2409 ASSERT(args->agbno % args->alignment == 0);
2410 XFS_AG_CHECK_DADDR(mp, XFS_FSB_TO_DADDR(mp, args->fsbno),
2411 args->len);
2412 #endif
2413 }
2414 xfs_perag_put(args->pag);
2415 return 0;
2416 error0:
2417 xfs_perag_put(args->pag);
2418 return error;
2419 }
2420
2421 static void
xfs_alloc_vextent_worker(struct work_struct * work)2422 xfs_alloc_vextent_worker(
2423 struct work_struct *work)
2424 {
2425 struct xfs_alloc_arg *args = container_of(work,
2426 struct xfs_alloc_arg, work);
2427 unsigned long pflags;
2428
2429 /* we are in a transaction context here */
2430 current_set_flags_nested(&pflags, PF_FSTRANS);
2431
2432 args->result = __xfs_alloc_vextent(args);
2433 complete(args->done);
2434
2435 current_restore_flags_nested(&pflags, PF_FSTRANS);
2436 }
2437
2438
2439 int /* error */
xfs_alloc_vextent(xfs_alloc_arg_t * args)2440 xfs_alloc_vextent(
2441 xfs_alloc_arg_t *args) /* allocation argument structure */
2442 {
2443 DECLARE_COMPLETION_ONSTACK(done);
2444
2445 args->done = &done;
2446 INIT_WORK(&args->work, xfs_alloc_vextent_worker);
2447 queue_work(xfs_alloc_wq, &args->work);
2448 wait_for_completion(&done);
2449 return args->result;
2450 }
2451
2452 /*
2453 * Free an extent.
2454 * Just break up the extent address and hand off to xfs_free_ag_extent
2455 * after fixing up the freelist.
2456 */
2457 int /* error */
xfs_free_extent(xfs_trans_t * tp,xfs_fsblock_t bno,xfs_extlen_t len)2458 xfs_free_extent(
2459 xfs_trans_t *tp, /* transaction pointer */
2460 xfs_fsblock_t bno, /* starting block number of extent */
2461 xfs_extlen_t len) /* length of extent */
2462 {
2463 xfs_alloc_arg_t args;
2464 int error;
2465
2466 ASSERT(len != 0);
2467 memset(&args, 0, sizeof(xfs_alloc_arg_t));
2468 args.tp = tp;
2469 args.mp = tp->t_mountp;
2470
2471 /*
2472 * validate that the block number is legal - the enables us to detect
2473 * and handle a silent filesystem corruption rather than crashing.
2474 */
2475 args.agno = XFS_FSB_TO_AGNO(args.mp, bno);
2476 if (args.agno >= args.mp->m_sb.sb_agcount)
2477 return EFSCORRUPTED;
2478
2479 args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno);
2480 if (args.agbno >= args.mp->m_sb.sb_agblocks)
2481 return EFSCORRUPTED;
2482
2483 args.pag = xfs_perag_get(args.mp, args.agno);
2484 ASSERT(args.pag);
2485
2486 error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
2487 if (error)
2488 goto error0;
2489
2490 /* validate the extent size is legal now we have the agf locked */
2491 if (args.agbno + len >
2492 be32_to_cpu(XFS_BUF_TO_AGF(args.agbp)->agf_length)) {
2493 error = EFSCORRUPTED;
2494 goto error0;
2495 }
2496
2497 error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, len, 0);
2498 if (!error)
2499 xfs_alloc_busy_insert(tp, args.agno, args.agbno, len, 0);
2500 error0:
2501 xfs_perag_put(args.pag);
2502 return error;
2503 }
2504
2505 void
xfs_alloc_busy_insert(struct xfs_trans * tp,xfs_agnumber_t agno,xfs_agblock_t bno,xfs_extlen_t len,unsigned int flags)2506 xfs_alloc_busy_insert(
2507 struct xfs_trans *tp,
2508 xfs_agnumber_t agno,
2509 xfs_agblock_t bno,
2510 xfs_extlen_t len,
2511 unsigned int flags)
2512 {
2513 struct xfs_busy_extent *new;
2514 struct xfs_busy_extent *busyp;
2515 struct xfs_perag *pag;
2516 struct rb_node **rbp;
2517 struct rb_node *parent = NULL;
2518
2519 new = kmem_zalloc(sizeof(struct xfs_busy_extent), KM_MAYFAIL);
2520 if (!new) {
2521 /*
2522 * No Memory! Since it is now not possible to track the free
2523 * block, make this a synchronous transaction to insure that
2524 * the block is not reused before this transaction commits.
2525 */
2526 trace_xfs_alloc_busy_enomem(tp->t_mountp, agno, bno, len);
2527 xfs_trans_set_sync(tp);
2528 return;
2529 }
2530
2531 new->agno = agno;
2532 new->bno = bno;
2533 new->length = len;
2534 INIT_LIST_HEAD(&new->list);
2535 new->flags = flags;
2536
2537 /* trace before insert to be able to see failed inserts */
2538 trace_xfs_alloc_busy(tp->t_mountp, agno, bno, len);
2539
2540 pag = xfs_perag_get(tp->t_mountp, new->agno);
2541 spin_lock(&pag->pagb_lock);
2542 rbp = &pag->pagb_tree.rb_node;
2543 while (*rbp) {
2544 parent = *rbp;
2545 busyp = rb_entry(parent, struct xfs_busy_extent, rb_node);
2546
2547 if (new->bno < busyp->bno) {
2548 rbp = &(*rbp)->rb_left;
2549 ASSERT(new->bno + new->length <= busyp->bno);
2550 } else if (new->bno > busyp->bno) {
2551 rbp = &(*rbp)->rb_right;
2552 ASSERT(bno >= busyp->bno + busyp->length);
2553 } else {
2554 ASSERT(0);
2555 }
2556 }
2557
2558 rb_link_node(&new->rb_node, parent, rbp);
2559 rb_insert_color(&new->rb_node, &pag->pagb_tree);
2560
2561 list_add(&new->list, &tp->t_busy);
2562 spin_unlock(&pag->pagb_lock);
2563 xfs_perag_put(pag);
2564 }
2565
2566 /*
2567 * Search for a busy extent within the range of the extent we are about to
2568 * allocate. You need to be holding the busy extent tree lock when calling
2569 * xfs_alloc_busy_search(). This function returns 0 for no overlapping busy
2570 * extent, -1 for an overlapping but not exact busy extent, and 1 for an exact
2571 * match. This is done so that a non-zero return indicates an overlap that
2572 * will require a synchronous transaction, but it can still be
2573 * used to distinguish between a partial or exact match.
2574 */
2575 int
xfs_alloc_busy_search(struct xfs_mount * mp,xfs_agnumber_t agno,xfs_agblock_t bno,xfs_extlen_t len)2576 xfs_alloc_busy_search(
2577 struct xfs_mount *mp,
2578 xfs_agnumber_t agno,
2579 xfs_agblock_t bno,
2580 xfs_extlen_t len)
2581 {
2582 struct xfs_perag *pag;
2583 struct rb_node *rbp;
2584 struct xfs_busy_extent *busyp;
2585 int match = 0;
2586
2587 pag = xfs_perag_get(mp, agno);
2588 spin_lock(&pag->pagb_lock);
2589
2590 rbp = pag->pagb_tree.rb_node;
2591
2592 /* find closest start bno overlap */
2593 while (rbp) {
2594 busyp = rb_entry(rbp, struct xfs_busy_extent, rb_node);
2595 if (bno < busyp->bno) {
2596 /* may overlap, but exact start block is lower */
2597 if (bno + len > busyp->bno)
2598 match = -1;
2599 rbp = rbp->rb_left;
2600 } else if (bno > busyp->bno) {
2601 /* may overlap, but exact start block is higher */
2602 if (bno < busyp->bno + busyp->length)
2603 match = -1;
2604 rbp = rbp->rb_right;
2605 } else {
2606 /* bno matches busyp, length determines exact match */
2607 match = (busyp->length == len) ? 1 : -1;
2608 break;
2609 }
2610 }
2611 spin_unlock(&pag->pagb_lock);
2612 xfs_perag_put(pag);
2613 return match;
2614 }
2615
2616 /*
2617 * The found free extent [fbno, fend] overlaps part or all of the given busy
2618 * extent. If the overlap covers the beginning, the end, or all of the busy
2619 * extent, the overlapping portion can be made unbusy and used for the
2620 * allocation. We can't split a busy extent because we can't modify a
2621 * transaction/CIL context busy list, but we can update an entries block
2622 * number or length.
2623 *
2624 * Returns true if the extent can safely be reused, or false if the search
2625 * needs to be restarted.
2626 */
2627 STATIC bool
xfs_alloc_busy_update_extent(struct xfs_mount * mp,struct xfs_perag * pag,struct xfs_busy_extent * busyp,xfs_agblock_t fbno,xfs_extlen_t flen,bool userdata)2628 xfs_alloc_busy_update_extent(
2629 struct xfs_mount *mp,
2630 struct xfs_perag *pag,
2631 struct xfs_busy_extent *busyp,
2632 xfs_agblock_t fbno,
2633 xfs_extlen_t flen,
2634 bool userdata)
2635 {
2636 xfs_agblock_t fend = fbno + flen;
2637 xfs_agblock_t bbno = busyp->bno;
2638 xfs_agblock_t bend = bbno + busyp->length;
2639
2640 /*
2641 * This extent is currently being discarded. Give the thread
2642 * performing the discard a chance to mark the extent unbusy
2643 * and retry.
2644 */
2645 if (busyp->flags & XFS_ALLOC_BUSY_DISCARDED) {
2646 spin_unlock(&pag->pagb_lock);
2647 delay(1);
2648 spin_lock(&pag->pagb_lock);
2649 return false;
2650 }
2651
2652 /*
2653 * If there is a busy extent overlapping a user allocation, we have
2654 * no choice but to force the log and retry the search.
2655 *
2656 * Fortunately this does not happen during normal operation, but
2657 * only if the filesystem is very low on space and has to dip into
2658 * the AGFL for normal allocations.
2659 */
2660 if (userdata)
2661 goto out_force_log;
2662
2663 if (bbno < fbno && bend > fend) {
2664 /*
2665 * Case 1:
2666 * bbno bend
2667 * +BBBBBBBBBBBBBBBBB+
2668 * +---------+
2669 * fbno fend
2670 */
2671
2672 /*
2673 * We would have to split the busy extent to be able to track
2674 * it correct, which we cannot do because we would have to
2675 * modify the list of busy extents attached to the transaction
2676 * or CIL context, which is immutable.
2677 *
2678 * Force out the log to clear the busy extent and retry the
2679 * search.
2680 */
2681 goto out_force_log;
2682 } else if (bbno >= fbno && bend <= fend) {
2683 /*
2684 * Case 2:
2685 * bbno bend
2686 * +BBBBBBBBBBBBBBBBB+
2687 * +-----------------+
2688 * fbno fend
2689 *
2690 * Case 3:
2691 * bbno bend
2692 * +BBBBBBBBBBBBBBBBB+
2693 * +--------------------------+
2694 * fbno fend
2695 *
2696 * Case 4:
2697 * bbno bend
2698 * +BBBBBBBBBBBBBBBBB+
2699 * +--------------------------+
2700 * fbno fend
2701 *
2702 * Case 5:
2703 * bbno bend
2704 * +BBBBBBBBBBBBBBBBB+
2705 * +-----------------------------------+
2706 * fbno fend
2707 *
2708 */
2709
2710 /*
2711 * The busy extent is fully covered by the extent we are
2712 * allocating, and can simply be removed from the rbtree.
2713 * However we cannot remove it from the immutable list
2714 * tracking busy extents in the transaction or CIL context,
2715 * so set the length to zero to mark it invalid.
2716 *
2717 * We also need to restart the busy extent search from the
2718 * tree root, because erasing the node can rearrange the
2719 * tree topology.
2720 */
2721 rb_erase(&busyp->rb_node, &pag->pagb_tree);
2722 busyp->length = 0;
2723 return false;
2724 } else if (fend < bend) {
2725 /*
2726 * Case 6:
2727 * bbno bend
2728 * +BBBBBBBBBBBBBBBBB+
2729 * +---------+
2730 * fbno fend
2731 *
2732 * Case 7:
2733 * bbno bend
2734 * +BBBBBBBBBBBBBBBBB+
2735 * +------------------+
2736 * fbno fend
2737 *
2738 */
2739 busyp->bno = fend;
2740 } else if (bbno < fbno) {
2741 /*
2742 * Case 8:
2743 * bbno bend
2744 * +BBBBBBBBBBBBBBBBB+
2745 * +-------------+
2746 * fbno fend
2747 *
2748 * Case 9:
2749 * bbno bend
2750 * +BBBBBBBBBBBBBBBBB+
2751 * +----------------------+
2752 * fbno fend
2753 */
2754 busyp->length = fbno - busyp->bno;
2755 } else {
2756 ASSERT(0);
2757 }
2758
2759 trace_xfs_alloc_busy_reuse(mp, pag->pag_agno, fbno, flen);
2760 return true;
2761
2762 out_force_log:
2763 spin_unlock(&pag->pagb_lock);
2764 xfs_log_force(mp, XFS_LOG_SYNC);
2765 trace_xfs_alloc_busy_force(mp, pag->pag_agno, fbno, flen);
2766 spin_lock(&pag->pagb_lock);
2767 return false;
2768 }
2769
2770
2771 /*
2772 * For a given extent [fbno, flen], make sure we can reuse it safely.
2773 */
2774 void
xfs_alloc_busy_reuse(struct xfs_mount * mp,xfs_agnumber_t agno,xfs_agblock_t fbno,xfs_extlen_t flen,bool userdata)2775 xfs_alloc_busy_reuse(
2776 struct xfs_mount *mp,
2777 xfs_agnumber_t agno,
2778 xfs_agblock_t fbno,
2779 xfs_extlen_t flen,
2780 bool userdata)
2781 {
2782 struct xfs_perag *pag;
2783 struct rb_node *rbp;
2784
2785 ASSERT(flen > 0);
2786
2787 pag = xfs_perag_get(mp, agno);
2788 spin_lock(&pag->pagb_lock);
2789 restart:
2790 rbp = pag->pagb_tree.rb_node;
2791 while (rbp) {
2792 struct xfs_busy_extent *busyp =
2793 rb_entry(rbp, struct xfs_busy_extent, rb_node);
2794 xfs_agblock_t bbno = busyp->bno;
2795 xfs_agblock_t bend = bbno + busyp->length;
2796
2797 if (fbno + flen <= bbno) {
2798 rbp = rbp->rb_left;
2799 continue;
2800 } else if (fbno >= bend) {
2801 rbp = rbp->rb_right;
2802 continue;
2803 }
2804
2805 if (!xfs_alloc_busy_update_extent(mp, pag, busyp, fbno, flen,
2806 userdata))
2807 goto restart;
2808 }
2809 spin_unlock(&pag->pagb_lock);
2810 xfs_perag_put(pag);
2811 }
2812
2813 /*
2814 * For a given extent [fbno, flen], search the busy extent list to find a
2815 * subset of the extent that is not busy. If *rlen is smaller than
2816 * args->minlen no suitable extent could be found, and the higher level
2817 * code needs to force out the log and retry the allocation.
2818 */
2819 STATIC void
xfs_alloc_busy_trim(struct xfs_alloc_arg * args,xfs_agblock_t bno,xfs_extlen_t len,xfs_agblock_t * rbno,xfs_extlen_t * rlen)2820 xfs_alloc_busy_trim(
2821 struct xfs_alloc_arg *args,
2822 xfs_agblock_t bno,
2823 xfs_extlen_t len,
2824 xfs_agblock_t *rbno,
2825 xfs_extlen_t *rlen)
2826 {
2827 xfs_agblock_t fbno;
2828 xfs_extlen_t flen;
2829 struct rb_node *rbp;
2830
2831 ASSERT(len > 0);
2832
2833 spin_lock(&args->pag->pagb_lock);
2834 restart:
2835 fbno = bno;
2836 flen = len;
2837 rbp = args->pag->pagb_tree.rb_node;
2838 while (rbp && flen >= args->minlen) {
2839 struct xfs_busy_extent *busyp =
2840 rb_entry(rbp, struct xfs_busy_extent, rb_node);
2841 xfs_agblock_t fend = fbno + flen;
2842 xfs_agblock_t bbno = busyp->bno;
2843 xfs_agblock_t bend = bbno + busyp->length;
2844
2845 if (fend <= bbno) {
2846 rbp = rbp->rb_left;
2847 continue;
2848 } else if (fbno >= bend) {
2849 rbp = rbp->rb_right;
2850 continue;
2851 }
2852
2853 /*
2854 * If this is a metadata allocation, try to reuse the busy
2855 * extent instead of trimming the allocation.
2856 */
2857 if (!args->userdata &&
2858 !(busyp->flags & XFS_ALLOC_BUSY_DISCARDED)) {
2859 if (!xfs_alloc_busy_update_extent(args->mp, args->pag,
2860 busyp, fbno, flen,
2861 false))
2862 goto restart;
2863 continue;
2864 }
2865
2866 if (bbno <= fbno) {
2867 /* start overlap */
2868
2869 /*
2870 * Case 1:
2871 * bbno bend
2872 * +BBBBBBBBBBBBBBBBB+
2873 * +---------+
2874 * fbno fend
2875 *
2876 * Case 2:
2877 * bbno bend
2878 * +BBBBBBBBBBBBBBBBB+
2879 * +-------------+
2880 * fbno fend
2881 *
2882 * Case 3:
2883 * bbno bend
2884 * +BBBBBBBBBBBBBBBBB+
2885 * +-------------+
2886 * fbno fend
2887 *
2888 * Case 4:
2889 * bbno bend
2890 * +BBBBBBBBBBBBBBBBB+
2891 * +-----------------+
2892 * fbno fend
2893 *
2894 * No unbusy region in extent, return failure.
2895 */
2896 if (fend <= bend)
2897 goto fail;
2898
2899 /*
2900 * Case 5:
2901 * bbno bend
2902 * +BBBBBBBBBBBBBBBBB+
2903 * +----------------------+
2904 * fbno fend
2905 *
2906 * Case 6:
2907 * bbno bend
2908 * +BBBBBBBBBBBBBBBBB+
2909 * +--------------------------+
2910 * fbno fend
2911 *
2912 * Needs to be trimmed to:
2913 * +-------+
2914 * fbno fend
2915 */
2916 fbno = bend;
2917 } else if (bend >= fend) {
2918 /* end overlap */
2919
2920 /*
2921 * Case 7:
2922 * bbno bend
2923 * +BBBBBBBBBBBBBBBBB+
2924 * +------------------+
2925 * fbno fend
2926 *
2927 * Case 8:
2928 * bbno bend
2929 * +BBBBBBBBBBBBBBBBB+
2930 * +--------------------------+
2931 * fbno fend
2932 *
2933 * Needs to be trimmed to:
2934 * +-------+
2935 * fbno fend
2936 */
2937 fend = bbno;
2938 } else {
2939 /* middle overlap */
2940
2941 /*
2942 * Case 9:
2943 * bbno bend
2944 * +BBBBBBBBBBBBBBBBB+
2945 * +-----------------------------------+
2946 * fbno fend
2947 *
2948 * Can be trimmed to:
2949 * +-------+ OR +-------+
2950 * fbno fend fbno fend
2951 *
2952 * Backward allocation leads to significant
2953 * fragmentation of directories, which degrades
2954 * directory performance, therefore we always want to
2955 * choose the option that produces forward allocation
2956 * patterns.
2957 * Preferring the lower bno extent will make the next
2958 * request use "fend" as the start of the next
2959 * allocation; if the segment is no longer busy at
2960 * that point, we'll get a contiguous allocation, but
2961 * even if it is still busy, we will get a forward
2962 * allocation.
2963 * We try to avoid choosing the segment at "bend",
2964 * because that can lead to the next allocation
2965 * taking the segment at "fbno", which would be a
2966 * backward allocation. We only use the segment at
2967 * "fbno" if it is much larger than the current
2968 * requested size, because in that case there's a
2969 * good chance subsequent allocations will be
2970 * contiguous.
2971 */
2972 if (bbno - fbno >= args->maxlen) {
2973 /* left candidate fits perfect */
2974 fend = bbno;
2975 } else if (fend - bend >= args->maxlen * 4) {
2976 /* right candidate has enough free space */
2977 fbno = bend;
2978 } else if (bbno - fbno >= args->minlen) {
2979 /* left candidate fits minimum requirement */
2980 fend = bbno;
2981 } else {
2982 goto fail;
2983 }
2984 }
2985
2986 flen = fend - fbno;
2987 }
2988 spin_unlock(&args->pag->pagb_lock);
2989
2990 if (fbno != bno || flen != len) {
2991 trace_xfs_alloc_busy_trim(args->mp, args->agno, bno, len,
2992 fbno, flen);
2993 }
2994 *rbno = fbno;
2995 *rlen = flen;
2996 return;
2997 fail:
2998 /*
2999 * Return a zero extent length as failure indications. All callers
3000 * re-check if the trimmed extent satisfies the minlen requirement.
3001 */
3002 spin_unlock(&args->pag->pagb_lock);
3003 trace_xfs_alloc_busy_trim(args->mp, args->agno, bno, len, fbno, 0);
3004 *rbno = fbno;
3005 *rlen = 0;
3006 }
3007
3008 static void
xfs_alloc_busy_clear_one(struct xfs_mount * mp,struct xfs_perag * pag,struct xfs_busy_extent * busyp)3009 xfs_alloc_busy_clear_one(
3010 struct xfs_mount *mp,
3011 struct xfs_perag *pag,
3012 struct xfs_busy_extent *busyp)
3013 {
3014 if (busyp->length) {
3015 trace_xfs_alloc_busy_clear(mp, busyp->agno, busyp->bno,
3016 busyp->length);
3017 rb_erase(&busyp->rb_node, &pag->pagb_tree);
3018 }
3019
3020 list_del_init(&busyp->list);
3021 kmem_free(busyp);
3022 }
3023
3024 /*
3025 * Remove all extents on the passed in list from the busy extents tree.
3026 * If do_discard is set skip extents that need to be discarded, and mark
3027 * these as undergoing a discard operation instead.
3028 */
3029 void
xfs_alloc_busy_clear(struct xfs_mount * mp,struct list_head * list,bool do_discard)3030 xfs_alloc_busy_clear(
3031 struct xfs_mount *mp,
3032 struct list_head *list,
3033 bool do_discard)
3034 {
3035 struct xfs_busy_extent *busyp, *n;
3036 struct xfs_perag *pag = NULL;
3037 xfs_agnumber_t agno = NULLAGNUMBER;
3038
3039 list_for_each_entry_safe(busyp, n, list, list) {
3040 if (busyp->agno != agno) {
3041 if (pag) {
3042 spin_unlock(&pag->pagb_lock);
3043 xfs_perag_put(pag);
3044 }
3045 pag = xfs_perag_get(mp, busyp->agno);
3046 spin_lock(&pag->pagb_lock);
3047 agno = busyp->agno;
3048 }
3049
3050 if (do_discard && busyp->length &&
3051 !(busyp->flags & XFS_ALLOC_BUSY_SKIP_DISCARD))
3052 busyp->flags = XFS_ALLOC_BUSY_DISCARDED;
3053 else
3054 xfs_alloc_busy_clear_one(mp, pag, busyp);
3055 }
3056
3057 if (pag) {
3058 spin_unlock(&pag->pagb_lock);
3059 xfs_perag_put(pag);
3060 }
3061 }
3062
3063 /*
3064 * Callback for list_sort to sort busy extents by the AG they reside in.
3065 */
3066 int
xfs_busy_extent_ag_cmp(void * priv,struct list_head * a,struct list_head * b)3067 xfs_busy_extent_ag_cmp(
3068 void *priv,
3069 struct list_head *a,
3070 struct list_head *b)
3071 {
3072 return container_of(a, struct xfs_busy_extent, list)->agno -
3073 container_of(b, struct xfs_busy_extent, list)->agno;
3074 }
3075