1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_sb.h"
13 #include "xfs_mount.h"
14 #include "xfs_trans.h"
15 #include "xfs_error.h"
16 #include "xfs_alloc.h"
17 #include "xfs_fsops.h"
18 #include "xfs_trans_space.h"
19 #include "xfs_log.h"
20 #include "xfs_log_priv.h"
21 #include "xfs_ag.h"
22 #include "xfs_ag_resv.h"
23 #include "xfs_trace.h"
24
25 /*
26 * Write new AG headers to disk. Non-transactional, but need to be
27 * written and completed prior to the growfs transaction being logged.
28 * To do this, we use a delayed write buffer list and wait for
29 * submission and IO completion of the list as a whole. This allows the
30 * IO subsystem to merge all the AG headers in a single AG into a single
31 * IO and hide most of the latency of the IO from us.
32 *
33 * This also means that if we get an error whilst building the buffer
34 * list to write, we can cancel the entire list without having written
35 * anything.
36 */
37 static int
xfs_resizefs_init_new_ags(struct xfs_trans * tp,struct aghdr_init_data * id,xfs_agnumber_t oagcount,xfs_agnumber_t nagcount,xfs_rfsblock_t delta,bool * lastag_extended)38 xfs_resizefs_init_new_ags(
39 struct xfs_trans *tp,
40 struct aghdr_init_data *id,
41 xfs_agnumber_t oagcount,
42 xfs_agnumber_t nagcount,
43 xfs_rfsblock_t delta,
44 bool *lastag_extended)
45 {
46 struct xfs_mount *mp = tp->t_mountp;
47 xfs_rfsblock_t nb = mp->m_sb.sb_dblocks + delta;
48 int error;
49
50 *lastag_extended = false;
51
52 INIT_LIST_HEAD(&id->buffer_list);
53 for (id->agno = nagcount - 1;
54 id->agno >= oagcount;
55 id->agno--, delta -= id->agsize) {
56
57 if (id->agno == nagcount - 1)
58 id->agsize = nb - (id->agno *
59 (xfs_rfsblock_t)mp->m_sb.sb_agblocks);
60 else
61 id->agsize = mp->m_sb.sb_agblocks;
62
63 error = xfs_ag_init_headers(mp, id);
64 if (error) {
65 xfs_buf_delwri_cancel(&id->buffer_list);
66 return error;
67 }
68 }
69
70 error = xfs_buf_delwri_submit(&id->buffer_list);
71 if (error)
72 return error;
73
74 if (delta) {
75 *lastag_extended = true;
76 error = xfs_ag_extend_space(mp, tp, id, delta);
77 }
78 return error;
79 }
80
81 /*
82 * growfs operations
83 */
84 static int
xfs_growfs_data_private(struct xfs_mount * mp,struct xfs_growfs_data * in)85 xfs_growfs_data_private(
86 struct xfs_mount *mp, /* mount point for filesystem */
87 struct xfs_growfs_data *in) /* growfs data input struct */
88 {
89 struct xfs_buf *bp;
90 int error;
91 xfs_agnumber_t nagcount;
92 xfs_agnumber_t nagimax = 0;
93 xfs_rfsblock_t nb, nb_div, nb_mod;
94 int64_t delta;
95 bool lastag_extended;
96 xfs_agnumber_t oagcount;
97 struct xfs_trans *tp;
98 struct aghdr_init_data id = {};
99
100 nb = in->newblocks;
101 error = xfs_sb_validate_fsb_count(&mp->m_sb, nb);
102 if (error)
103 return error;
104
105 if (nb > mp->m_sb.sb_dblocks) {
106 error = xfs_buf_read_uncached(mp->m_ddev_targp,
107 XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1),
108 XFS_FSS_TO_BB(mp, 1), 0, &bp, NULL);
109 if (error)
110 return error;
111 xfs_buf_relse(bp);
112 }
113
114 nb_div = nb;
115 nb_mod = do_div(nb_div, mp->m_sb.sb_agblocks);
116 nagcount = nb_div + (nb_mod != 0);
117 if (nb_mod && nb_mod < XFS_MIN_AG_BLOCKS) {
118 nagcount--;
119 nb = (xfs_rfsblock_t)nagcount * mp->m_sb.sb_agblocks;
120 }
121 delta = nb - mp->m_sb.sb_dblocks;
122 /*
123 * Reject filesystems with a single AG because they are not
124 * supported, and reject a shrink operation that would cause a
125 * filesystem to become unsupported.
126 */
127 if (delta < 0 && nagcount < 2)
128 return -EINVAL;
129
130 oagcount = mp->m_sb.sb_agcount;
131
132 /* allocate the new per-ag structures */
133 if (nagcount > oagcount) {
134 error = xfs_initialize_perag(mp, nagcount, &nagimax);
135 if (error)
136 return error;
137 } else if (nagcount < oagcount) {
138 /* TODO: shrinking the entire AGs hasn't yet completed */
139 return -EINVAL;
140 }
141
142 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growdata,
143 (delta > 0 ? XFS_GROWFS_SPACE_RES(mp) : -delta), 0,
144 XFS_TRANS_RESERVE, &tp);
145 if (error)
146 return error;
147
148 if (delta > 0) {
149 error = xfs_resizefs_init_new_ags(tp, &id, oagcount, nagcount,
150 delta, &lastag_extended);
151 } else {
152 xfs_warn_mount(mp, XFS_OPSTATE_WARNED_SHRINK,
153 "EXPERIMENTAL online shrink feature in use. Use at your own risk!");
154
155 error = xfs_ag_shrink_space(mp, &tp, nagcount - 1, -delta);
156 }
157 if (error)
158 goto out_trans_cancel;
159
160 /*
161 * Update changed superblock fields transactionally. These are not
162 * seen by the rest of the world until the transaction commit applies
163 * them atomically to the superblock.
164 */
165 if (nagcount > oagcount)
166 xfs_trans_mod_sb(tp, XFS_TRANS_SB_AGCOUNT, nagcount - oagcount);
167 if (delta)
168 xfs_trans_mod_sb(tp, XFS_TRANS_SB_DBLOCKS, delta);
169 if (id.nfree)
170 xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, id.nfree);
171
172 /*
173 * Sync sb counters now to reflect the updated values. This is
174 * particularly important for shrink because the write verifier
175 * will fail if sb_fdblocks is ever larger than sb_dblocks.
176 */
177 if (xfs_has_lazysbcount(mp))
178 xfs_log_sb(tp);
179
180 xfs_trans_set_sync(tp);
181 error = xfs_trans_commit(tp);
182 if (error)
183 return error;
184
185 /* New allocation groups fully initialized, so update mount struct */
186 if (nagimax)
187 mp->m_maxagi = nagimax;
188 xfs_set_low_space_thresholds(mp);
189 mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
190
191 if (delta > 0) {
192 /*
193 * If we expanded the last AG, free the per-AG reservation
194 * so we can reinitialize it with the new size.
195 */
196 if (lastag_extended) {
197 struct xfs_perag *pag;
198
199 pag = xfs_perag_get(mp, id.agno);
200 error = xfs_ag_resv_free(pag);
201 xfs_perag_put(pag);
202 if (error)
203 return error;
204 }
205 /*
206 * Reserve AG metadata blocks. ENOSPC here does not mean there
207 * was a growfs failure, just that there still isn't space for
208 * new user data after the grow has been run.
209 */
210 error = xfs_fs_reserve_ag_blocks(mp);
211 if (error == -ENOSPC)
212 error = 0;
213 }
214 return error;
215
216 out_trans_cancel:
217 xfs_trans_cancel(tp);
218 return error;
219 }
220
221 static int
xfs_growfs_log_private(struct xfs_mount * mp,struct xfs_growfs_log * in)222 xfs_growfs_log_private(
223 struct xfs_mount *mp, /* mount point for filesystem */
224 struct xfs_growfs_log *in) /* growfs log input struct */
225 {
226 xfs_extlen_t nb;
227
228 nb = in->newblocks;
229 if (nb < XFS_MIN_LOG_BLOCKS || nb < XFS_B_TO_FSB(mp, XFS_MIN_LOG_BYTES))
230 return -EINVAL;
231 if (nb == mp->m_sb.sb_logblocks &&
232 in->isint == (mp->m_sb.sb_logstart != 0))
233 return -EINVAL;
234 /*
235 * Moving the log is hard, need new interfaces to sync
236 * the log first, hold off all activity while moving it.
237 * Can have shorter or longer log in the same space,
238 * or transform internal to external log or vice versa.
239 */
240 return -ENOSYS;
241 }
242
243 static int
xfs_growfs_imaxpct(struct xfs_mount * mp,__u32 imaxpct)244 xfs_growfs_imaxpct(
245 struct xfs_mount *mp,
246 __u32 imaxpct)
247 {
248 struct xfs_trans *tp;
249 int dpct;
250 int error;
251
252 if (imaxpct > 100)
253 return -EINVAL;
254
255 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growdata,
256 XFS_GROWFS_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, &tp);
257 if (error)
258 return error;
259
260 dpct = imaxpct - mp->m_sb.sb_imax_pct;
261 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IMAXPCT, dpct);
262 xfs_trans_set_sync(tp);
263 return xfs_trans_commit(tp);
264 }
265
266 /*
267 * protected versions of growfs function acquire and release locks on the mount
268 * point - exported through ioctls: XFS_IOC_FSGROWFSDATA, XFS_IOC_FSGROWFSLOG,
269 * XFS_IOC_FSGROWFSRT
270 */
271 int
xfs_growfs_data(struct xfs_mount * mp,struct xfs_growfs_data * in)272 xfs_growfs_data(
273 struct xfs_mount *mp,
274 struct xfs_growfs_data *in)
275 {
276 int error = 0;
277
278 if (!capable(CAP_SYS_ADMIN))
279 return -EPERM;
280 if (!mutex_trylock(&mp->m_growlock))
281 return -EWOULDBLOCK;
282
283 /* update imaxpct separately to the physical grow of the filesystem */
284 if (in->imaxpct != mp->m_sb.sb_imax_pct) {
285 error = xfs_growfs_imaxpct(mp, in->imaxpct);
286 if (error)
287 goto out_error;
288 }
289
290 if (in->newblocks != mp->m_sb.sb_dblocks) {
291 error = xfs_growfs_data_private(mp, in);
292 if (error)
293 goto out_error;
294 }
295
296 /* Post growfs calculations needed to reflect new state in operations */
297 if (mp->m_sb.sb_imax_pct) {
298 uint64_t icount = mp->m_sb.sb_dblocks * mp->m_sb.sb_imax_pct;
299 do_div(icount, 100);
300 M_IGEO(mp)->maxicount = XFS_FSB_TO_INO(mp, icount);
301 } else
302 M_IGEO(mp)->maxicount = 0;
303
304 /* Update secondary superblocks now the physical grow has completed */
305 error = xfs_update_secondary_sbs(mp);
306
307 out_error:
308 /*
309 * Increment the generation unconditionally, the error could be from
310 * updating the secondary superblocks, in which case the new size
311 * is live already.
312 */
313 mp->m_generation++;
314 mutex_unlock(&mp->m_growlock);
315 return error;
316 }
317
318 int
xfs_growfs_log(xfs_mount_t * mp,struct xfs_growfs_log * in)319 xfs_growfs_log(
320 xfs_mount_t *mp,
321 struct xfs_growfs_log *in)
322 {
323 int error;
324
325 if (!capable(CAP_SYS_ADMIN))
326 return -EPERM;
327 if (!mutex_trylock(&mp->m_growlock))
328 return -EWOULDBLOCK;
329 error = xfs_growfs_log_private(mp, in);
330 mutex_unlock(&mp->m_growlock);
331 return error;
332 }
333
334 /*
335 * exported through ioctl XFS_IOC_FSCOUNTS
336 */
337
338 void
xfs_fs_counts(xfs_mount_t * mp,xfs_fsop_counts_t * cnt)339 xfs_fs_counts(
340 xfs_mount_t *mp,
341 xfs_fsop_counts_t *cnt)
342 {
343 cnt->allocino = percpu_counter_read_positive(&mp->m_icount);
344 cnt->freeino = percpu_counter_read_positive(&mp->m_ifree);
345 cnt->freedata = percpu_counter_read_positive(&mp->m_fdblocks) -
346 xfs_fdblocks_unavailable(mp);
347 cnt->freertx = percpu_counter_read_positive(&mp->m_frextents);
348 }
349
350 /*
351 * exported through ioctl XFS_IOC_SET_RESBLKS & XFS_IOC_GET_RESBLKS
352 *
353 * xfs_reserve_blocks is called to set m_resblks
354 * in the in-core mount table. The number of unused reserved blocks
355 * is kept in m_resblks_avail.
356 *
357 * Reserve the requested number of blocks if available. Otherwise return
358 * as many as possible to satisfy the request. The actual number
359 * reserved are returned in outval
360 *
361 * A null inval pointer indicates that only the current reserved blocks
362 * available should be returned no settings are changed.
363 */
364
365 int
xfs_reserve_blocks(xfs_mount_t * mp,uint64_t * inval,xfs_fsop_resblks_t * outval)366 xfs_reserve_blocks(
367 xfs_mount_t *mp,
368 uint64_t *inval,
369 xfs_fsop_resblks_t *outval)
370 {
371 int64_t lcounter, delta;
372 int64_t fdblks_delta = 0;
373 uint64_t request;
374 int64_t free;
375 int error = 0;
376
377 /* If inval is null, report current values and return */
378 if (inval == (uint64_t *)NULL) {
379 if (!outval)
380 return -EINVAL;
381 outval->resblks = mp->m_resblks;
382 outval->resblks_avail = mp->m_resblks_avail;
383 return 0;
384 }
385
386 request = *inval;
387
388 /*
389 * With per-cpu counters, this becomes an interesting problem. we need
390 * to work out if we are freeing or allocation blocks first, then we can
391 * do the modification as necessary.
392 *
393 * We do this under the m_sb_lock so that if we are near ENOSPC, we will
394 * hold out any changes while we work out what to do. This means that
395 * the amount of free space can change while we do this, so we need to
396 * retry if we end up trying to reserve more space than is available.
397 */
398 spin_lock(&mp->m_sb_lock);
399
400 /*
401 * If our previous reservation was larger than the current value,
402 * then move any unused blocks back to the free pool. Modify the resblks
403 * counters directly since we shouldn't have any problems unreserving
404 * space.
405 */
406 if (mp->m_resblks > request) {
407 lcounter = mp->m_resblks_avail - request;
408 if (lcounter > 0) { /* release unused blocks */
409 fdblks_delta = lcounter;
410 mp->m_resblks_avail -= lcounter;
411 }
412 mp->m_resblks = request;
413 if (fdblks_delta) {
414 spin_unlock(&mp->m_sb_lock);
415 error = xfs_mod_fdblocks(mp, fdblks_delta, 0);
416 spin_lock(&mp->m_sb_lock);
417 }
418
419 goto out;
420 }
421
422 /*
423 * If the request is larger than the current reservation, reserve the
424 * blocks before we update the reserve counters. Sample m_fdblocks and
425 * perform a partial reservation if the request exceeds free space.
426 *
427 * The code below estimates how many blocks it can request from
428 * fdblocks to stash in the reserve pool. This is a classic TOCTOU
429 * race since fdblocks updates are not always coordinated via
430 * m_sb_lock. Set the reserve size even if there's not enough free
431 * space to fill it because mod_fdblocks will refill an undersized
432 * reserve when it can.
433 */
434 free = percpu_counter_sum(&mp->m_fdblocks) -
435 xfs_fdblocks_unavailable(mp);
436 delta = request - mp->m_resblks;
437 mp->m_resblks = request;
438 if (delta > 0 && free > 0) {
439 /*
440 * We'll either succeed in getting space from the free block
441 * count or we'll get an ENOSPC. Don't set the reserved flag
442 * here - we don't want to reserve the extra reserve blocks
443 * from the reserve.
444 *
445 * The desired reserve size can change after we drop the lock.
446 * Use mod_fdblocks to put the space into the reserve or into
447 * fdblocks as appropriate.
448 */
449 fdblks_delta = min(free, delta);
450 spin_unlock(&mp->m_sb_lock);
451 error = xfs_mod_fdblocks(mp, -fdblks_delta, 0);
452 if (!error)
453 xfs_mod_fdblocks(mp, fdblks_delta, 0);
454 spin_lock(&mp->m_sb_lock);
455 }
456 out:
457 if (outval) {
458 outval->resblks = mp->m_resblks;
459 outval->resblks_avail = mp->m_resblks_avail;
460 }
461
462 spin_unlock(&mp->m_sb_lock);
463 return error;
464 }
465
466 int
xfs_fs_goingdown(xfs_mount_t * mp,uint32_t inflags)467 xfs_fs_goingdown(
468 xfs_mount_t *mp,
469 uint32_t inflags)
470 {
471 switch (inflags) {
472 case XFS_FSOP_GOING_FLAGS_DEFAULT: {
473 if (!freeze_bdev(mp->m_super->s_bdev)) {
474 xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT);
475 thaw_bdev(mp->m_super->s_bdev);
476 }
477 break;
478 }
479 case XFS_FSOP_GOING_FLAGS_LOGFLUSH:
480 xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT);
481 break;
482 case XFS_FSOP_GOING_FLAGS_NOLOGFLUSH:
483 xfs_force_shutdown(mp,
484 SHUTDOWN_FORCE_UMOUNT | SHUTDOWN_LOG_IO_ERROR);
485 break;
486 default:
487 return -EINVAL;
488 }
489
490 return 0;
491 }
492
493 /*
494 * Force a shutdown of the filesystem instantly while keeping the filesystem
495 * consistent. We don't do an unmount here; just shutdown the shop, make sure
496 * that absolutely nothing persistent happens to this filesystem after this
497 * point.
498 *
499 * The shutdown state change is atomic, resulting in the first and only the
500 * first shutdown call processing the shutdown. This means we only shutdown the
501 * log once as it requires, and we don't spam the logs when multiple concurrent
502 * shutdowns race to set the shutdown flags.
503 */
504 void
xfs_do_force_shutdown(struct xfs_mount * mp,uint32_t flags,char * fname,int lnnum)505 xfs_do_force_shutdown(
506 struct xfs_mount *mp,
507 uint32_t flags,
508 char *fname,
509 int lnnum)
510 {
511 int tag;
512 const char *why;
513
514
515 if (test_and_set_bit(XFS_OPSTATE_SHUTDOWN, &mp->m_opstate)) {
516 xlog_shutdown_wait(mp->m_log);
517 return;
518 }
519 if (mp->m_sb_bp)
520 mp->m_sb_bp->b_flags |= XBF_DONE;
521
522 if (flags & SHUTDOWN_FORCE_UMOUNT)
523 xfs_alert(mp, "User initiated shutdown received.");
524
525 if (xlog_force_shutdown(mp->m_log, flags)) {
526 tag = XFS_PTAG_SHUTDOWN_LOGERROR;
527 why = "Log I/O Error";
528 } else if (flags & SHUTDOWN_CORRUPT_INCORE) {
529 tag = XFS_PTAG_SHUTDOWN_CORRUPT;
530 why = "Corruption of in-memory data";
531 } else {
532 tag = XFS_PTAG_SHUTDOWN_IOERROR;
533 why = "Metadata I/O Error";
534 }
535
536 trace_xfs_force_shutdown(mp, tag, flags, fname, lnnum);
537
538 xfs_alert_tag(mp, tag,
539 "%s (0x%x) detected at %pS (%s:%d). Shutting down filesystem.",
540 why, flags, __return_address, fname, lnnum);
541 xfs_alert(mp,
542 "Please unmount the filesystem and rectify the problem(s)");
543 if (xfs_error_level >= XFS_ERRLEVEL_HIGH)
544 xfs_stack_trace();
545 }
546
547 /*
548 * Reserve free space for per-AG metadata.
549 */
550 int
xfs_fs_reserve_ag_blocks(struct xfs_mount * mp)551 xfs_fs_reserve_ag_blocks(
552 struct xfs_mount *mp)
553 {
554 xfs_agnumber_t agno;
555 struct xfs_perag *pag;
556 int error = 0;
557 int err2;
558
559 mp->m_finobt_nores = false;
560 for_each_perag(mp, agno, pag) {
561 err2 = xfs_ag_resv_init(pag, NULL);
562 if (err2 && !error)
563 error = err2;
564 }
565
566 if (error && error != -ENOSPC) {
567 xfs_warn(mp,
568 "Error %d reserving per-AG metadata reserve pool.", error);
569 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
570 }
571
572 return error;
573 }
574
575 /*
576 * Free space reserved for per-AG metadata.
577 */
578 int
xfs_fs_unreserve_ag_blocks(struct xfs_mount * mp)579 xfs_fs_unreserve_ag_blocks(
580 struct xfs_mount *mp)
581 {
582 xfs_agnumber_t agno;
583 struct xfs_perag *pag;
584 int error = 0;
585 int err2;
586
587 for_each_perag(mp, agno, pag) {
588 err2 = xfs_ag_resv_free(pag);
589 if (err2 && !error)
590 error = err2;
591 }
592
593 if (error)
594 xfs_warn(mp,
595 "Error %d freeing per-AG metadata reserve pool.", error);
596
597 return error;
598 }
599