1 /*
2 * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it would be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
11 *
12 * Further, this software is distributed without any warranty that it is
13 * free of the rightful claim of any third person regarding infringement
14 * or the like. Any license provided herein, whether implied or
15 * otherwise, applies only to this software file. Patent licenses, if
16 * any, provided herein do not apply to combinations of this program with
17 * other software, or any other product whatsoever.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston MA 02111-1307, USA.
22 *
23 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24 * Mountain View, CA 94043, or:
25 *
26 * http://www.sgi.com
27 *
28 * For further information regarding this notice, see:
29 *
30 * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
31 */
32
33 #include "xfs.h"
34 #include "xfs_macros.h"
35 #include "xfs_types.h"
36 #include "xfs_inum.h"
37 #include "xfs_log.h"
38 #include "xfs_trans.h"
39 #include "xfs_sb.h"
40 #include "xfs_ag.h"
41 #include "xfs_dir.h"
42 #include "xfs_dir2.h"
43 #include "xfs_dmapi.h"
44 #include "xfs_mount.h"
45 #include "xfs_alloc_btree.h"
46 #include "xfs_bmap_btree.h"
47 #include "xfs_ialloc_btree.h"
48 #include "xfs_btree.h"
49 #include "xfs_ialloc.h"
50 #include "xfs_attr_sf.h"
51 #include "xfs_dir_sf.h"
52 #include "xfs_dir2_sf.h"
53 #include "xfs_dinode.h"
54 #include "xfs_inode.h"
55 #include "xfs_alloc.h"
56 #include "xfs_rtalloc.h"
57 #include "xfs_bmap.h"
58 #include "xfs_error.h"
59 #include "xfs_bit.h"
60 #include "xfs_rw.h"
61 #include "xfs_quota.h"
62 #include "xfs_fsops.h"
63
64 STATIC void xfs_mount_log_sbunit(xfs_mount_t *, __int64_t);
65 STATIC int xfs_uuid_mount(xfs_mount_t *);
66 STATIC void xfs_uuid_unmount(xfs_mount_t *mp);
67
68 void xfs_xlatesb(void *, xfs_sb_t *, int, xfs_arch_t, __int64_t);
69
70 static struct {
71 short offset;
72 short type; /* 0 = integer
73 * 1 = binary / string (no translation)
74 */
75 } xfs_sb_info[] = {
76 { offsetof(xfs_sb_t, sb_magicnum), 0 },
77 { offsetof(xfs_sb_t, sb_blocksize), 0 },
78 { offsetof(xfs_sb_t, sb_dblocks), 0 },
79 { offsetof(xfs_sb_t, sb_rblocks), 0 },
80 { offsetof(xfs_sb_t, sb_rextents), 0 },
81 { offsetof(xfs_sb_t, sb_uuid), 1 },
82 { offsetof(xfs_sb_t, sb_logstart), 0 },
83 { offsetof(xfs_sb_t, sb_rootino), 0 },
84 { offsetof(xfs_sb_t, sb_rbmino), 0 },
85 { offsetof(xfs_sb_t, sb_rsumino), 0 },
86 { offsetof(xfs_sb_t, sb_rextsize), 0 },
87 { offsetof(xfs_sb_t, sb_agblocks), 0 },
88 { offsetof(xfs_sb_t, sb_agcount), 0 },
89 { offsetof(xfs_sb_t, sb_rbmblocks), 0 },
90 { offsetof(xfs_sb_t, sb_logblocks), 0 },
91 { offsetof(xfs_sb_t, sb_versionnum), 0 },
92 { offsetof(xfs_sb_t, sb_sectsize), 0 },
93 { offsetof(xfs_sb_t, sb_inodesize), 0 },
94 { offsetof(xfs_sb_t, sb_inopblock), 0 },
95 { offsetof(xfs_sb_t, sb_fname[0]), 1 },
96 { offsetof(xfs_sb_t, sb_blocklog), 0 },
97 { offsetof(xfs_sb_t, sb_sectlog), 0 },
98 { offsetof(xfs_sb_t, sb_inodelog), 0 },
99 { offsetof(xfs_sb_t, sb_inopblog), 0 },
100 { offsetof(xfs_sb_t, sb_agblklog), 0 },
101 { offsetof(xfs_sb_t, sb_rextslog), 0 },
102 { offsetof(xfs_sb_t, sb_inprogress), 0 },
103 { offsetof(xfs_sb_t, sb_imax_pct), 0 },
104 { offsetof(xfs_sb_t, sb_icount), 0 },
105 { offsetof(xfs_sb_t, sb_ifree), 0 },
106 { offsetof(xfs_sb_t, sb_fdblocks), 0 },
107 { offsetof(xfs_sb_t, sb_frextents), 0 },
108 { offsetof(xfs_sb_t, sb_uquotino), 0 },
109 { offsetof(xfs_sb_t, sb_gquotino), 0 },
110 { offsetof(xfs_sb_t, sb_qflags), 0 },
111 { offsetof(xfs_sb_t, sb_flags), 0 },
112 { offsetof(xfs_sb_t, sb_shared_vn), 0 },
113 { offsetof(xfs_sb_t, sb_inoalignmt), 0 },
114 { offsetof(xfs_sb_t, sb_unit), 0 },
115 { offsetof(xfs_sb_t, sb_width), 0 },
116 { offsetof(xfs_sb_t, sb_dirblklog), 0 },
117 { offsetof(xfs_sb_t, sb_logsectlog), 0 },
118 { offsetof(xfs_sb_t, sb_logsectsize),0 },
119 { offsetof(xfs_sb_t, sb_logsunit), 0 },
120 { offsetof(xfs_sb_t, sb_features2), 0 },
121 { sizeof(xfs_sb_t), 0 }
122 };
123
124 /*
125 * Return a pointer to an initialized xfs_mount structure.
126 */
127 xfs_mount_t *
xfs_mount_init(void)128 xfs_mount_init(void)
129 {
130 xfs_mount_t *mp;
131
132 mp = kmem_zalloc(sizeof(*mp), KM_SLEEP);
133
134 AIL_LOCKINIT(&mp->m_ail_lock, "xfs_ail");
135 spinlock_init(&mp->m_sb_lock, "xfs_sb");
136 mutex_init(&mp->m_ilock, MUTEX_DEFAULT, "xfs_ilock");
137 initnsema(&mp->m_growlock, 1, "xfs_grow");
138 /*
139 * Initialize the AIL.
140 */
141 xfs_trans_ail_init(mp);
142
143 atomic_set(&mp->m_active_trans, 0);
144
145 return mp;
146 }
147
148 /*
149 * Free up the resources associated with a mount structure. Assume that
150 * the structure was initially zeroed, so we can tell which fields got
151 * initialized.
152 */
153 void
xfs_mount_free(xfs_mount_t * mp,int remove_bhv)154 xfs_mount_free(
155 xfs_mount_t *mp,
156 int remove_bhv)
157 {
158 if (mp->m_ihash)
159 xfs_ihash_free(mp);
160 if (mp->m_chash)
161 xfs_chash_free(mp);
162
163 if (mp->m_perag) {
164 int agno;
165
166 for (agno = 0; agno < mp->m_maxagi; agno++)
167 if (mp->m_perag[agno].pagb_list)
168 kmem_free(mp->m_perag[agno].pagb_list,
169 sizeof(xfs_perag_busy_t) *
170 XFS_PAGB_NUM_SLOTS);
171 kmem_free(mp->m_perag,
172 sizeof(xfs_perag_t) * mp->m_sb.sb_agcount);
173 }
174
175 AIL_LOCK_DESTROY(&mp->m_ail_lock);
176 spinlock_destroy(&mp->m_sb_lock);
177 mutex_destroy(&mp->m_ilock);
178 freesema(&mp->m_growlock);
179 if (mp->m_quotainfo)
180 XFS_QM_DONE(mp);
181
182 if (mp->m_fsname != NULL)
183 kmem_free(mp->m_fsname, mp->m_fsname_len);
184
185 if (remove_bhv) {
186 struct vfs *vfsp = XFS_MTOVFS(mp);
187
188 bhv_remove_all_vfsops(vfsp, 0);
189 VFS_REMOVEBHV(vfsp, &mp->m_bhv);
190 }
191
192 kmem_free(mp, sizeof(xfs_mount_t));
193 }
194
195
196 /*
197 * Check the validity of the SB found.
198 */
199 STATIC int
xfs_mount_validate_sb(xfs_mount_t * mp,xfs_sb_t * sbp)200 xfs_mount_validate_sb(
201 xfs_mount_t *mp,
202 xfs_sb_t *sbp)
203 {
204 /*
205 * If the log device and data device have the
206 * same device number, the log is internal.
207 * Consequently, the sb_logstart should be non-zero. If
208 * we have a zero sb_logstart in this case, we may be trying to mount
209 * a volume filesystem in a non-volume manner.
210 */
211 if (sbp->sb_magicnum != XFS_SB_MAGIC) {
212 cmn_err(CE_WARN, "XFS: bad magic number");
213 return XFS_ERROR(EWRONGFS);
214 }
215
216 if (!XFS_SB_GOOD_VERSION(sbp)) {
217 cmn_err(CE_WARN, "XFS: bad version");
218 return XFS_ERROR(EWRONGFS);
219 }
220
221 if (unlikely(
222 sbp->sb_logstart == 0 && mp->m_logdev_targp == mp->m_ddev_targp)) {
223 cmn_err(CE_WARN,
224 "XFS: filesystem is marked as having an external log; "
225 "specify logdev on the\nmount command line.");
226 XFS_CORRUPTION_ERROR("xfs_mount_validate_sb(1)",
227 XFS_ERRLEVEL_HIGH, mp, sbp);
228 return XFS_ERROR(EFSCORRUPTED);
229 }
230
231 if (unlikely(
232 sbp->sb_logstart != 0 && mp->m_logdev_targp != mp->m_ddev_targp)) {
233 cmn_err(CE_WARN,
234 "XFS: filesystem is marked as having an internal log; "
235 "don't specify logdev on\nthe mount command line.");
236 XFS_CORRUPTION_ERROR("xfs_mount_validate_sb(2)",
237 XFS_ERRLEVEL_HIGH, mp, sbp);
238 return XFS_ERROR(EFSCORRUPTED);
239 }
240
241 /*
242 * More sanity checking. These were stolen directly from
243 * xfs_repair.
244 */
245 if (unlikely(
246 sbp->sb_agcount <= 0 ||
247 sbp->sb_sectsize < XFS_MIN_SECTORSIZE ||
248 sbp->sb_sectsize > XFS_MAX_SECTORSIZE ||
249 sbp->sb_sectlog < XFS_MIN_SECTORSIZE_LOG ||
250 sbp->sb_sectlog > XFS_MAX_SECTORSIZE_LOG ||
251 sbp->sb_blocksize < XFS_MIN_BLOCKSIZE ||
252 sbp->sb_blocksize > XFS_MAX_BLOCKSIZE ||
253 sbp->sb_blocklog < XFS_MIN_BLOCKSIZE_LOG ||
254 sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG ||
255 sbp->sb_inodesize < XFS_DINODE_MIN_SIZE ||
256 sbp->sb_inodesize > XFS_DINODE_MAX_SIZE ||
257 (sbp->sb_rextsize * sbp->sb_blocksize > XFS_MAX_RTEXTSIZE) ||
258 (sbp->sb_rextsize * sbp->sb_blocksize < XFS_MIN_RTEXTSIZE) ||
259 sbp->sb_imax_pct > 100)) {
260 cmn_err(CE_WARN, "XFS: SB sanity check 1 failed");
261 XFS_CORRUPTION_ERROR("xfs_mount_validate_sb(3)",
262 XFS_ERRLEVEL_LOW, mp, sbp);
263 return XFS_ERROR(EFSCORRUPTED);
264 }
265
266 /*
267 * Sanity check AG count, size fields against data size field
268 */
269 if (unlikely(
270 sbp->sb_dblocks == 0 ||
271 sbp->sb_dblocks >
272 (xfs_drfsbno_t)sbp->sb_agcount * sbp->sb_agblocks ||
273 sbp->sb_dblocks < (xfs_drfsbno_t)(sbp->sb_agcount - 1) *
274 sbp->sb_agblocks + XFS_MIN_AG_BLOCKS)) {
275 cmn_err(CE_WARN, "XFS: SB sanity check 2 failed");
276 XFS_ERROR_REPORT("xfs_mount_validate_sb(4)",
277 XFS_ERRLEVEL_LOW, mp);
278 return XFS_ERROR(EFSCORRUPTED);
279 }
280
281 ASSERT(PAGE_SHIFT >= sbp->sb_blocklog);
282 ASSERT(sbp->sb_blocklog >= BBSHIFT);
283
284 #if XFS_BIG_BLKNOS /* Limited by ULONG_MAX of page cache index */
285 if (unlikely(
286 (sbp->sb_dblocks >> (PAGE_SHIFT - sbp->sb_blocklog)) > ULONG_MAX ||
287 (sbp->sb_rblocks >> (PAGE_SHIFT - sbp->sb_blocklog)) > ULONG_MAX)) {
288 #else /* Limited by UINT_MAX of sectors */
289 if (unlikely(
290 (sbp->sb_dblocks << (sbp->sb_blocklog - BBSHIFT)) > UINT_MAX ||
291 (sbp->sb_rblocks << (sbp->sb_blocklog - BBSHIFT)) > UINT_MAX)) {
292 #endif
293 cmn_err(CE_WARN,
294 "XFS: File system is too large to be mounted on this system.");
295 return XFS_ERROR(E2BIG);
296 }
297
298 if (unlikely(sbp->sb_inprogress)) {
299 cmn_err(CE_WARN, "XFS: file system busy");
300 XFS_ERROR_REPORT("xfs_mount_validate_sb(5)",
301 XFS_ERRLEVEL_LOW, mp);
302 return XFS_ERROR(EFSCORRUPTED);
303 }
304
305 /*
306 * Until this is fixed only page-sized or smaller data blocks work.
307 */
308 if (unlikely(sbp->sb_blocksize > PAGE_SIZE)) {
309 cmn_err(CE_WARN,
310 "XFS: Attempted to mount file system with blocksize %d bytes",
311 sbp->sb_blocksize);
312 cmn_err(CE_WARN,
313 "XFS: Only page-sized (%d) or less blocksizes currently work.",
314 PAGE_SIZE);
315 return XFS_ERROR(ENOSYS);
316 }
317
318 return 0;
319 }
320
321 xfs_agnumber_t
322 xfs_initialize_perag(xfs_mount_t *mp, xfs_agnumber_t agcount)
323 {
324 xfs_agnumber_t index, max_metadata;
325 xfs_perag_t *pag;
326 xfs_agino_t agino;
327 xfs_ino_t ino;
328 xfs_sb_t *sbp = &mp->m_sb;
329 xfs_ino_t max_inum = XFS_MAXINUMBER_32;
330
331 /* Check to see if the filesystem can overflow 32 bit inodes */
332 agino = XFS_OFFBNO_TO_AGINO(mp, sbp->sb_agblocks - 1, 0);
333 ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
334
335 /* Clear the mount flag if no inode can overflow 32 bits
336 * on this filesystem, or if specifically requested..
337 */
338 if ((mp->m_flags & XFS_MOUNT_32BITINOOPT) && ino > max_inum) {
339 mp->m_flags |= XFS_MOUNT_32BITINODES;
340 } else {
341 mp->m_flags &= ~XFS_MOUNT_32BITINODES;
342 }
343
344 /* If we can overflow then setup the ag headers accordingly */
345 if (mp->m_flags & XFS_MOUNT_32BITINODES) {
346 /* Calculate how much should be reserved for inodes to
347 * meet the max inode percentage.
348 */
349 if (mp->m_maxicount) {
350 __uint64_t icount;
351
352 icount = sbp->sb_dblocks * sbp->sb_imax_pct;
353 do_div(icount, 100);
354 icount += sbp->sb_agblocks - 1;
355 do_div(icount, mp->m_ialloc_blks);
356 max_metadata = icount;
357 } else {
358 max_metadata = agcount;
359 }
360 for (index = 0; index < agcount; index++) {
361 ino = XFS_AGINO_TO_INO(mp, index, agino);
362 if (ino > max_inum) {
363 index++;
364 break;
365 }
366
367 /* This ag is prefered for inodes */
368 pag = &mp->m_perag[index];
369 pag->pagi_inodeok = 1;
370 if (index < max_metadata)
371 pag->pagf_metadata = 1;
372 }
373 } else {
374 /* Setup default behavior for smaller filesystems */
375 for (index = 0; index < agcount; index++) {
376 pag = &mp->m_perag[index];
377 pag->pagi_inodeok = 1;
378 }
379 }
380 return index;
381 }
382
383 /*
384 * xfs_xlatesb
385 *
386 * data - on disk version of sb
387 * sb - a superblock
388 * dir - conversion direction: <0 - convert sb to buf
389 * >0 - convert buf to sb
390 * arch - architecture to read/write from/to buf
391 * fields - which fields to copy (bitmask)
392 */
393 void
394 xfs_xlatesb(
395 void *data,
396 xfs_sb_t *sb,
397 int dir,
398 xfs_arch_t arch,
399 __int64_t fields)
400 {
401 xfs_caddr_t buf_ptr;
402 xfs_caddr_t mem_ptr;
403 xfs_sb_field_t f;
404 int first;
405 int size;
406
407 ASSERT(dir);
408 ASSERT(fields);
409
410 if (!fields)
411 return;
412
413 buf_ptr = (xfs_caddr_t)data;
414 mem_ptr = (xfs_caddr_t)sb;
415
416 while (fields) {
417 f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields);
418 first = xfs_sb_info[f].offset;
419 size = xfs_sb_info[f + 1].offset - first;
420
421 ASSERT(xfs_sb_info[f].type == 0 || xfs_sb_info[f].type == 1);
422
423 if (arch == ARCH_NOCONVERT ||
424 size == 1 ||
425 xfs_sb_info[f].type == 1) {
426 if (dir > 0) {
427 memcpy(mem_ptr + first, buf_ptr + first, size);
428 } else {
429 memcpy(buf_ptr + first, mem_ptr + first, size);
430 }
431 } else {
432 switch (size) {
433 case 2:
434 INT_XLATE(*(__uint16_t*)(buf_ptr+first),
435 *(__uint16_t*)(mem_ptr+first),
436 dir, arch);
437 break;
438 case 4:
439 INT_XLATE(*(__uint32_t*)(buf_ptr+first),
440 *(__uint32_t*)(mem_ptr+first),
441 dir, arch);
442 break;
443 case 8:
444 INT_XLATE(*(__uint64_t*)(buf_ptr+first),
445 *(__uint64_t*)(mem_ptr+first), dir, arch);
446 break;
447 default:
448 ASSERT(0);
449 }
450 }
451
452 fields &= ~(1LL << f);
453 }
454 }
455
456 /*
457 * xfs_readsb
458 *
459 * Does the initial read of the superblock.
460 */
461 int
462 xfs_readsb(xfs_mount_t *mp)
463 {
464 unsigned int sector_size;
465 unsigned int extra_flags;
466 xfs_buf_t *bp;
467 xfs_sb_t *sbp;
468 int error;
469
470 ASSERT(mp->m_sb_bp == NULL);
471 ASSERT(mp->m_ddev_targp != NULL);
472
473 /*
474 * Allocate a (locked) buffer to hold the superblock.
475 * This will be kept around at all times to optimize
476 * access to the superblock.
477 */
478 sector_size = xfs_getsize_buftarg(mp->m_ddev_targp);
479 extra_flags = XFS_BUF_LOCK | XFS_BUF_MANAGE | XFS_BUF_MAPPED;
480
481 bp = xfs_buf_read_flags(mp->m_ddev_targp, XFS_SB_DADDR,
482 BTOBB(sector_size), extra_flags);
483 if (!bp || XFS_BUF_ISERROR(bp)) {
484 cmn_err(CE_WARN, "XFS: SB read failed");
485 error = bp ? XFS_BUF_GETERROR(bp) : ENOMEM;
486 goto fail;
487 }
488 ASSERT(XFS_BUF_ISBUSY(bp));
489 ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
490
491 /*
492 * Initialize the mount structure from the superblock.
493 * But first do some basic consistency checking.
494 */
495 sbp = XFS_BUF_TO_SBP(bp);
496 xfs_xlatesb(XFS_BUF_PTR(bp), &(mp->m_sb), 1,
497 ARCH_CONVERT, XFS_SB_ALL_BITS);
498
499 error = xfs_mount_validate_sb(mp, &(mp->m_sb));
500 if (error) {
501 cmn_err(CE_WARN, "XFS: SB validate failed");
502 goto fail;
503 }
504
505 /*
506 * We must be able to do sector-sized and sector-aligned IO.
507 */
508 if (sector_size > mp->m_sb.sb_sectsize) {
509 cmn_err(CE_WARN,
510 "XFS: device supports only %u byte sectors (not %u)",
511 sector_size, mp->m_sb.sb_sectsize);
512 error = ENOSYS;
513 goto fail;
514 }
515
516 /*
517 * If device sector size is smaller than the superblock size,
518 * re-read the superblock so the buffer is correctly sized.
519 */
520 if (sector_size < mp->m_sb.sb_sectsize) {
521 XFS_BUF_UNMANAGE(bp);
522 xfs_buf_relse(bp);
523 sector_size = mp->m_sb.sb_sectsize;
524 bp = xfs_buf_read_flags(mp->m_ddev_targp, XFS_SB_DADDR,
525 BTOBB(sector_size), extra_flags);
526 if (!bp || XFS_BUF_ISERROR(bp)) {
527 cmn_err(CE_WARN, "XFS: SB re-read failed");
528 error = bp ? XFS_BUF_GETERROR(bp) : ENOMEM;
529 goto fail;
530 }
531 ASSERT(XFS_BUF_ISBUSY(bp));
532 ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
533 }
534
535 mp->m_sb_bp = bp;
536 xfs_buf_relse(bp);
537 ASSERT(XFS_BUF_VALUSEMA(bp) > 0);
538 return 0;
539
540 fail:
541 if (bp) {
542 XFS_BUF_UNMANAGE(bp);
543 xfs_buf_relse(bp);
544 }
545 return error;
546 }
547
548
549 /*
550 * xfs_mount_common
551 *
552 * Mount initialization code establishing various mount
553 * fields from the superblock associated with the given
554 * mount structure
555 */
556 void
557 xfs_mount_common(xfs_mount_t *mp, xfs_sb_t *sbp)
558 {
559 int i;
560
561 mp->m_agfrotor = mp->m_agirotor = 0;
562 spinlock_init(&mp->m_agirotor_lock, "m_agirotor_lock");
563 mp->m_maxagi = mp->m_sb.sb_agcount;
564 mp->m_blkbit_log = sbp->sb_blocklog + XFS_NBBYLOG;
565 mp->m_blkbb_log = sbp->sb_blocklog - BBSHIFT;
566 mp->m_sectbb_log = sbp->sb_sectlog - BBSHIFT;
567 mp->m_agno_log = xfs_highbit32(sbp->sb_agcount - 1) + 1;
568 mp->m_agino_log = sbp->sb_inopblog + sbp->sb_agblklog;
569 mp->m_litino = sbp->sb_inodesize -
570 ((uint)sizeof(xfs_dinode_core_t) + (uint)sizeof(xfs_agino_t));
571 mp->m_blockmask = sbp->sb_blocksize - 1;
572 mp->m_blockwsize = sbp->sb_blocksize >> XFS_WORDLOG;
573 mp->m_blockwmask = mp->m_blockwsize - 1;
574 INIT_LIST_HEAD(&mp->m_del_inodes);
575
576 /*
577 * Setup for attributes, in case they get created.
578 * This value is for inodes getting attributes for the first time,
579 * the per-inode value is for old attribute values.
580 */
581 ASSERT(sbp->sb_inodesize >= 256 && sbp->sb_inodesize <= 2048);
582 switch (sbp->sb_inodesize) {
583 case 256:
584 mp->m_attroffset = XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(2);
585 break;
586 case 512:
587 case 1024:
588 case 2048:
589 mp->m_attroffset = XFS_BMDR_SPACE_CALC(12);
590 break;
591 default:
592 ASSERT(0);
593 }
594 ASSERT(mp->m_attroffset < XFS_LITINO(mp));
595
596 for (i = 0; i < 2; i++) {
597 mp->m_alloc_mxr[i] = XFS_BTREE_BLOCK_MAXRECS(sbp->sb_blocksize,
598 xfs_alloc, i == 0);
599 mp->m_alloc_mnr[i] = XFS_BTREE_BLOCK_MINRECS(sbp->sb_blocksize,
600 xfs_alloc, i == 0);
601 }
602 for (i = 0; i < 2; i++) {
603 mp->m_bmap_dmxr[i] = XFS_BTREE_BLOCK_MAXRECS(sbp->sb_blocksize,
604 xfs_bmbt, i == 0);
605 mp->m_bmap_dmnr[i] = XFS_BTREE_BLOCK_MINRECS(sbp->sb_blocksize,
606 xfs_bmbt, i == 0);
607 }
608 for (i = 0; i < 2; i++) {
609 mp->m_inobt_mxr[i] = XFS_BTREE_BLOCK_MAXRECS(sbp->sb_blocksize,
610 xfs_inobt, i == 0);
611 mp->m_inobt_mnr[i] = XFS_BTREE_BLOCK_MINRECS(sbp->sb_blocksize,
612 xfs_inobt, i == 0);
613 }
614
615 mp->m_bsize = XFS_FSB_TO_BB(mp, 1);
616 mp->m_ialloc_inos = (int)MAX((__uint16_t)XFS_INODES_PER_CHUNK,
617 sbp->sb_inopblock);
618 mp->m_ialloc_blks = mp->m_ialloc_inos >> sbp->sb_inopblog;
619 }
620 /*
621 * xfs_mountfs
622 *
623 * This function does the following on an initial mount of a file system:
624 * - reads the superblock from disk and init the mount struct
625 * - if we're a 32-bit kernel, do a size check on the superblock
626 * so we don't mount terabyte filesystems
627 * - init mount struct realtime fields
628 * - allocate inode hash table for fs
629 * - init directory manager
630 * - perform recovery and init the log manager
631 */
632 int
633 xfs_mountfs(
634 vfs_t *vfsp,
635 xfs_mount_t *mp,
636 int mfsi_flags)
637 {
638 xfs_buf_t *bp;
639 xfs_sb_t *sbp = &(mp->m_sb);
640 xfs_inode_t *rip;
641 vnode_t *rvp = NULL;
642 int readio_log, writeio_log;
643 xfs_daddr_t d;
644 __uint64_t ret64;
645 __int64_t update_flags;
646 uint quotamount, quotaflags;
647 int agno;
648 int uuid_mounted = 0;
649 int error = 0;
650
651 if (mp->m_sb_bp == NULL) {
652 if ((error = xfs_readsb(mp))) {
653 return (error);
654 }
655 }
656 xfs_mount_common(mp, sbp);
657
658 /*
659 * Check if sb_agblocks is aligned at stripe boundary
660 * If sb_agblocks is NOT aligned turn off m_dalign since
661 * allocator alignment is within an ag, therefore ag has
662 * to be aligned at stripe boundary.
663 */
664 update_flags = 0LL;
665 if (mp->m_dalign && !(mfsi_flags & XFS_MFSI_SECOND)) {
666 /*
667 * If stripe unit and stripe width are not multiples
668 * of the fs blocksize turn off alignment.
669 */
670 if ((BBTOB(mp->m_dalign) & mp->m_blockmask) ||
671 (BBTOB(mp->m_swidth) & mp->m_blockmask)) {
672 if (mp->m_flags & XFS_MOUNT_RETERR) {
673 cmn_err(CE_WARN,
674 "XFS: alignment check 1 failed");
675 error = XFS_ERROR(EINVAL);
676 goto error1;
677 }
678 mp->m_dalign = mp->m_swidth = 0;
679 } else {
680 /*
681 * Convert the stripe unit and width to FSBs.
682 */
683 mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign);
684 if (mp->m_dalign && (sbp->sb_agblocks % mp->m_dalign)) {
685 if (mp->m_flags & XFS_MOUNT_RETERR) {
686 error = XFS_ERROR(EINVAL);
687 goto error1;
688 }
689 xfs_fs_cmn_err(CE_WARN, mp,
690 "stripe alignment turned off: sunit(%d)/swidth(%d) incompatible with agsize(%d)",
691 mp->m_dalign, mp->m_swidth,
692 sbp->sb_agblocks);
693
694 mp->m_dalign = 0;
695 mp->m_swidth = 0;
696 } else if (mp->m_dalign) {
697 mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth);
698 } else {
699 if (mp->m_flags & XFS_MOUNT_RETERR) {
700 xfs_fs_cmn_err(CE_WARN, mp,
701 "stripe alignment turned off: sunit(%d) less than bsize(%d)",
702 mp->m_dalign,
703 mp->m_blockmask +1);
704 error = XFS_ERROR(EINVAL);
705 goto error1;
706 }
707 mp->m_swidth = 0;
708 }
709 }
710
711 /*
712 * Update superblock with new values
713 * and log changes
714 */
715 if (XFS_SB_VERSION_HASDALIGN(sbp)) {
716 if (sbp->sb_unit != mp->m_dalign) {
717 sbp->sb_unit = mp->m_dalign;
718 update_flags |= XFS_SB_UNIT;
719 }
720 if (sbp->sb_width != mp->m_swidth) {
721 sbp->sb_width = mp->m_swidth;
722 update_flags |= XFS_SB_WIDTH;
723 }
724 }
725 } else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN &&
726 XFS_SB_VERSION_HASDALIGN(&mp->m_sb)) {
727 mp->m_dalign = sbp->sb_unit;
728 mp->m_swidth = sbp->sb_width;
729 }
730
731 xfs_alloc_compute_maxlevels(mp);
732 xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK);
733 xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK);
734 xfs_ialloc_compute_maxlevels(mp);
735
736 if (sbp->sb_imax_pct) {
737 __uint64_t icount;
738
739 /* Make sure the maximum inode count is a multiple of the
740 * units we allocate inodes in.
741 */
742
743 icount = sbp->sb_dblocks * sbp->sb_imax_pct;
744 do_div(icount, 100);
745 do_div(icount, mp->m_ialloc_blks);
746 mp->m_maxicount = (icount * mp->m_ialloc_blks) <<
747 sbp->sb_inopblog;
748 } else
749 mp->m_maxicount = 0;
750
751 mp->m_maxioffset = xfs_max_file_offset(sbp->sb_blocklog);
752
753 /*
754 * XFS uses the uuid from the superblock as the unique
755 * identifier for fsid. We can not use the uuid from the volume
756 * since a single partition filesystem is identical to a single
757 * partition volume/filesystem.
758 */
759 if ((mfsi_flags & XFS_MFSI_SECOND) == 0 &&
760 (mp->m_flags & XFS_MOUNT_NOUUID) == 0) {
761 if (xfs_uuid_mount(mp)) {
762 error = XFS_ERROR(EINVAL);
763 goto error1;
764 }
765 uuid_mounted=1;
766 ret64 = uuid_hash64(&sbp->sb_uuid);
767 memcpy(&vfsp->vfs_fsid, &ret64, sizeof(ret64));
768 }
769
770 /*
771 * Set the default minimum read and write sizes unless
772 * already specified in a mount option.
773 * We use smaller I/O sizes when the file system
774 * is being used for NFS service (wsync mount option).
775 */
776 if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) {
777 if (mp->m_flags & XFS_MOUNT_WSYNC) {
778 readio_log = XFS_WSYNC_READIO_LOG;
779 writeio_log = XFS_WSYNC_WRITEIO_LOG;
780 } else {
781 readio_log = XFS_READIO_LOG_LARGE;
782 writeio_log = XFS_WRITEIO_LOG_LARGE;
783 }
784 } else {
785 readio_log = mp->m_readio_log;
786 writeio_log = mp->m_writeio_log;
787 }
788
789 /*
790 * Set the number of readahead buffers to use based on
791 * physical memory size.
792 */
793 if (xfs_physmem <= 4096) /* <= 16MB */
794 mp->m_nreadaheads = XFS_RW_NREADAHEAD_16MB;
795 else if (xfs_physmem <= 8192) /* <= 32MB */
796 mp->m_nreadaheads = XFS_RW_NREADAHEAD_32MB;
797 else
798 mp->m_nreadaheads = XFS_RW_NREADAHEAD_K32;
799 if (sbp->sb_blocklog > readio_log) {
800 mp->m_readio_log = sbp->sb_blocklog;
801 } else {
802 mp->m_readio_log = readio_log;
803 }
804 mp->m_readio_blocks = 1 << (mp->m_readio_log - sbp->sb_blocklog);
805 if (sbp->sb_blocklog > writeio_log) {
806 mp->m_writeio_log = sbp->sb_blocklog;
807 } else {
808 mp->m_writeio_log = writeio_log;
809 }
810 mp->m_writeio_blocks = 1 << (mp->m_writeio_log - sbp->sb_blocklog);
811
812 /*
813 * Set the inode cluster size based on the physical memory
814 * size. This may still be overridden by the file system
815 * block size if it is larger than the chosen cluster size.
816 */
817 if (xfs_physmem <= btoc(32 * 1024 * 1024)) { /* <= 32 MB */
818 mp->m_inode_cluster_size = XFS_INODE_SMALL_CLUSTER_SIZE;
819 } else {
820 mp->m_inode_cluster_size = XFS_INODE_BIG_CLUSTER_SIZE;
821 }
822 /*
823 * Set whether we're using inode alignment.
824 */
825 if (XFS_SB_VERSION_HASALIGN(&mp->m_sb) &&
826 mp->m_sb.sb_inoalignmt >=
827 XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size))
828 mp->m_inoalign_mask = mp->m_sb.sb_inoalignmt - 1;
829 else
830 mp->m_inoalign_mask = 0;
831 /*
832 * If we are using stripe alignment, check whether
833 * the stripe unit is a multiple of the inode alignment
834 */
835 if (mp->m_dalign && mp->m_inoalign_mask &&
836 !(mp->m_dalign & mp->m_inoalign_mask))
837 mp->m_sinoalign = mp->m_dalign;
838 else
839 mp->m_sinoalign = 0;
840 /*
841 * Check that the data (and log if separate) are an ok size.
842 */
843 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks);
844 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_dblocks) {
845 cmn_err(CE_WARN, "XFS: size check 1 failed");
846 error = XFS_ERROR(E2BIG);
847 goto error1;
848 }
849 error = xfs_read_buf(mp, mp->m_ddev_targp,
850 d - XFS_FSS_TO_BB(mp, 1),
851 XFS_FSS_TO_BB(mp, 1), 0, &bp);
852 if (!error) {
853 xfs_buf_relse(bp);
854 } else {
855 cmn_err(CE_WARN, "XFS: size check 2 failed");
856 if (error == ENOSPC) {
857 error = XFS_ERROR(E2BIG);
858 }
859 goto error1;
860 }
861
862 if (((mfsi_flags & XFS_MFSI_CLIENT) == 0) &&
863 mp->m_logdev_targp != mp->m_ddev_targp) {
864 d = (xfs_daddr_t)XFS_FSB_TO_BB(mp, mp->m_sb.sb_logblocks);
865 if (XFS_BB_TO_FSB(mp, d) != mp->m_sb.sb_logblocks) {
866 cmn_err(CE_WARN, "XFS: size check 3 failed");
867 error = XFS_ERROR(E2BIG);
868 goto error1;
869 }
870 error = xfs_read_buf(mp, mp->m_logdev_targp,
871 d - XFS_FSB_TO_BB(mp, 1),
872 XFS_FSB_TO_BB(mp, 1), 0, &bp);
873 if (!error) {
874 xfs_buf_relse(bp);
875 } else {
876 cmn_err(CE_WARN, "XFS: size check 3 failed");
877 if (error == ENOSPC) {
878 error = XFS_ERROR(E2BIG);
879 }
880 goto error1;
881 }
882 }
883
884 /*
885 * Initialize realtime fields in the mount structure
886 */
887 if ((error = xfs_rtmount_init(mp))) {
888 cmn_err(CE_WARN, "XFS: RT mount failed");
889 goto error1;
890 }
891
892 /*
893 * For client case we are done now
894 */
895 if (mfsi_flags & XFS_MFSI_CLIENT) {
896 return(0);
897 }
898
899 /*
900 * Copies the low order bits of the timestamp and the randomly
901 * set "sequence" number out of a UUID.
902 */
903 uuid_getnodeuniq(&sbp->sb_uuid, mp->m_fixedfsid);
904
905 /*
906 * The vfs structure needs to have a file system independent
907 * way of checking for the invariant file system ID. Since it
908 * can't look at mount structures it has a pointer to the data
909 * in the mount structure.
910 *
911 * File systems that don't support user level file handles (i.e.
912 * all of them except for XFS) will leave vfs_altfsid as NULL.
913 */
914 vfsp->vfs_altfsid = (xfs_fsid_t *)mp->m_fixedfsid;
915 mp->m_dmevmask = 0; /* not persistent; set after each mount */
916
917 /*
918 * Select the right directory manager.
919 */
920 mp->m_dirops =
921 XFS_SB_VERSION_HASDIRV2(&mp->m_sb) ?
922 xfsv2_dirops :
923 xfsv1_dirops;
924
925 /*
926 * Initialize directory manager's entries.
927 */
928 XFS_DIR_MOUNT(mp);
929
930 /*
931 * Initialize the attribute manager's entries.
932 */
933 mp->m_attr_magicpct = (mp->m_sb.sb_blocksize * 37) / 100;
934
935 /*
936 * Initialize the precomputed transaction reservations values.
937 */
938 xfs_trans_init(mp);
939
940 /*
941 * Allocate and initialize the inode hash table for this
942 * file system.
943 */
944 xfs_ihash_init(mp);
945 xfs_chash_init(mp);
946
947 /*
948 * Allocate and initialize the per-ag data.
949 */
950 init_rwsem(&mp->m_peraglock);
951 mp->m_perag =
952 kmem_zalloc(sbp->sb_agcount * sizeof(xfs_perag_t), KM_SLEEP);
953
954 mp->m_maxagi = xfs_initialize_perag(mp, sbp->sb_agcount);
955
956 /*
957 * log's mount-time initialization. Perform 1st part recovery if needed
958 */
959 if (likely(sbp->sb_logblocks > 0)) { /* check for volume case */
960 error = xfs_log_mount(mp, mp->m_logdev_targp,
961 XFS_FSB_TO_DADDR(mp, sbp->sb_logstart),
962 XFS_FSB_TO_BB(mp, sbp->sb_logblocks));
963 if (error) {
964 cmn_err(CE_WARN, "XFS: log mount failed");
965 goto error2;
966 }
967 } else { /* No log has been defined */
968 cmn_err(CE_WARN, "XFS: no log defined");
969 XFS_ERROR_REPORT("xfs_mountfs_int(1)", XFS_ERRLEVEL_LOW, mp);
970 error = XFS_ERROR(EFSCORRUPTED);
971 goto error2;
972 }
973
974 /*
975 * Get and sanity-check the root inode.
976 * Save the pointer to it in the mount structure.
977 */
978 error = xfs_iget(mp, NULL, sbp->sb_rootino, 0, XFS_ILOCK_EXCL, &rip, 0);
979 if (error) {
980 cmn_err(CE_WARN, "XFS: failed to read root inode");
981 goto error3;
982 }
983
984 ASSERT(rip != NULL);
985 rvp = XFS_ITOV(rip);
986
987 if (unlikely((rip->i_d.di_mode & S_IFMT) != S_IFDIR)) {
988 cmn_err(CE_WARN, "XFS: corrupted root inode");
989 prdev("Root inode %llu is not a directory",
990 mp->m_ddev_targp, (unsigned long long)rip->i_ino);
991 xfs_iunlock(rip, XFS_ILOCK_EXCL);
992 XFS_ERROR_REPORT("xfs_mountfs_int(2)", XFS_ERRLEVEL_LOW,
993 mp);
994 error = XFS_ERROR(EFSCORRUPTED);
995 goto error4;
996 }
997 mp->m_rootip = rip; /* save it */
998
999 xfs_iunlock(rip, XFS_ILOCK_EXCL);
1000
1001 /*
1002 * Initialize realtime inode pointers in the mount structure
1003 */
1004 if ((error = xfs_rtmount_inodes(mp))) {
1005 /*
1006 * Free up the root inode.
1007 */
1008 cmn_err(CE_WARN, "XFS: failed to read RT inodes");
1009 goto error4;
1010 }
1011
1012 /*
1013 * If fs is not mounted readonly, then update the superblock
1014 * unit and width changes.
1015 */
1016 if (update_flags && !(vfsp->vfs_flag & VFS_RDONLY))
1017 xfs_mount_log_sbunit(mp, update_flags);
1018
1019 /*
1020 * Initialise the XFS quota management subsystem for this mount
1021 */
1022 if ((error = XFS_QM_INIT(mp, "amount, "aflags)))
1023 goto error4;
1024
1025 /*
1026 * Finish recovering the file system. This part needed to be
1027 * delayed until after the root and real-time bitmap inodes
1028 * were consistently read in.
1029 */
1030 error = xfs_log_mount_finish(mp, mfsi_flags);
1031 if (error) {
1032 cmn_err(CE_WARN, "XFS: log mount finish failed");
1033 goto error4;
1034 }
1035
1036 /*
1037 * Complete the quota initialisation, post-log-replay component.
1038 */
1039 if ((error = XFS_QM_MOUNT(mp, quotamount, quotaflags, mfsi_flags)))
1040 goto error4;
1041
1042 return 0;
1043
1044 error4:
1045 /*
1046 * Free up the root inode.
1047 */
1048 VN_RELE(rvp);
1049 error3:
1050 xfs_log_unmount_dealloc(mp);
1051 error2:
1052 xfs_ihash_free(mp);
1053 xfs_chash_free(mp);
1054 for (agno = 0; agno < sbp->sb_agcount; agno++)
1055 if (mp->m_perag[agno].pagb_list)
1056 kmem_free(mp->m_perag[agno].pagb_list,
1057 sizeof(xfs_perag_busy_t) * XFS_PAGB_NUM_SLOTS);
1058 kmem_free(mp->m_perag, sbp->sb_agcount * sizeof(xfs_perag_t));
1059 mp->m_perag = NULL;
1060 /* FALLTHROUGH */
1061 error1:
1062 if (uuid_mounted)
1063 xfs_uuid_unmount(mp);
1064 xfs_freesb(mp);
1065 return error;
1066 }
1067
1068 /*
1069 * xfs_unmountfs
1070 *
1071 * This flushes out the inodes,dquots and the superblock, unmounts the
1072 * log and makes sure that incore structures are freed.
1073 */
1074 int
1075 xfs_unmountfs(xfs_mount_t *mp, struct cred *cr)
1076 {
1077 struct vfs *vfsp = XFS_MTOVFS(mp);
1078 #if defined(DEBUG) || defined(INDUCE_IO_ERROR)
1079 int64_t fsid;
1080 #endif
1081
1082 xfs_iflush_all(mp, XFS_FLUSH_ALL);
1083
1084 XFS_QM_DQPURGEALL(mp,
1085 XFS_QMOPT_UQUOTA | XFS_QMOPT_GQUOTA | XFS_QMOPT_UMOUNTING);
1086
1087 /*
1088 * Flush out the log synchronously so that we know for sure
1089 * that nothing is pinned. This is important because bflush()
1090 * will skip pinned buffers.
1091 */
1092 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC);
1093
1094 xfs_binval(mp->m_ddev_targp);
1095 if (mp->m_rtdev_targp) {
1096 xfs_binval(mp->m_rtdev_targp);
1097 }
1098
1099 xfs_unmountfs_writesb(mp);
1100
1101 xfs_unmountfs_wait(mp); /* wait for async bufs */
1102
1103 xfs_log_unmount(mp); /* Done! No more fs ops. */
1104
1105 xfs_freesb(mp);
1106
1107 /*
1108 * All inodes from this mount point should be freed.
1109 */
1110 ASSERT(mp->m_inodes == NULL);
1111
1112 /*
1113 * We may have bufs that are in the process of getting written still.
1114 * We must wait for the I/O completion of those. The sync flag here
1115 * does a two pass iteration thru the bufcache.
1116 */
1117 if (XFS_FORCED_SHUTDOWN(mp)) {
1118 xfs_incore_relse(mp->m_ddev_targp, 0, 1); /* synchronous */
1119 }
1120
1121 xfs_unmountfs_close(mp, cr);
1122 if ((mp->m_flags & XFS_MOUNT_NOUUID) == 0)
1123 xfs_uuid_unmount(mp);
1124
1125 #if defined(DEBUG) || defined(INDUCE_IO_ERROR)
1126 /*
1127 * clear all error tags on this filesystem
1128 */
1129 memcpy(&fsid, &vfsp->vfs_fsid, sizeof(int64_t));
1130 xfs_errortag_clearall_umount(fsid, mp->m_fsname, 0);
1131 #endif
1132 XFS_IODONE(vfsp);
1133 xfs_mount_free(mp, 1);
1134 return 0;
1135 }
1136
1137 void
1138 xfs_unmountfs_close(xfs_mount_t *mp, struct cred *cr)
1139 {
1140 if (mp->m_logdev_targp != mp->m_ddev_targp)
1141 xfs_free_buftarg(mp->m_logdev_targp, 1);
1142 if (mp->m_rtdev_targp)
1143 xfs_free_buftarg(mp->m_rtdev_targp, 1);
1144 xfs_free_buftarg(mp->m_ddev_targp, 0);
1145 }
1146
1147 void
1148 xfs_unmountfs_wait(xfs_mount_t *mp)
1149 {
1150 if (mp->m_logdev_targp != mp->m_ddev_targp)
1151 xfs_wait_buftarg(mp->m_logdev_targp);
1152 if (mp->m_rtdev_targp)
1153 xfs_wait_buftarg(mp->m_rtdev_targp);
1154 xfs_wait_buftarg(mp->m_ddev_targp);
1155 }
1156
1157 int
1158 xfs_unmountfs_writesb(xfs_mount_t *mp)
1159 {
1160 xfs_buf_t *sbp;
1161 xfs_sb_t *sb;
1162 int error = 0;
1163
1164 /*
1165 * skip superblock write if fs is read-only, or
1166 * if we are doing a forced umount.
1167 */
1168 sbp = xfs_getsb(mp, 0);
1169 if (!(XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY ||
1170 XFS_FORCED_SHUTDOWN(mp))) {
1171 /*
1172 * mark shared-readonly if desired
1173 */
1174 sb = XFS_BUF_TO_SBP(sbp);
1175 if (mp->m_mk_sharedro) {
1176 if (!(sb->sb_flags & XFS_SBF_READONLY))
1177 sb->sb_flags |= XFS_SBF_READONLY;
1178 if (!XFS_SB_VERSION_HASSHARED(sb))
1179 XFS_SB_VERSION_ADDSHARED(sb);
1180 xfs_fs_cmn_err(CE_NOTE, mp,
1181 "Unmounting, marking shared read-only");
1182 }
1183 XFS_BUF_UNDONE(sbp);
1184 XFS_BUF_UNREAD(sbp);
1185 XFS_BUF_UNDELAYWRITE(sbp);
1186 XFS_BUF_WRITE(sbp);
1187 XFS_BUF_UNASYNC(sbp);
1188 ASSERT(XFS_BUF_TARGET(sbp) == mp->m_ddev_targp);
1189 xfsbdstrat(mp, sbp);
1190 /* Nevermind errors we might get here. */
1191 error = xfs_iowait(sbp);
1192 if (error)
1193 xfs_ioerror_alert("xfs_unmountfs_writesb",
1194 mp, sbp, XFS_BUF_ADDR(sbp));
1195 if (error && mp->m_mk_sharedro)
1196 xfs_fs_cmn_err(CE_ALERT, mp, "Superblock write error detected while unmounting. Filesystem may not be marked shared readonly");
1197 }
1198 xfs_buf_relse(sbp);
1199 return (error);
1200 }
1201
1202 /*
1203 * xfs_mod_sb() can be used to copy arbitrary changes to the
1204 * in-core superblock into the superblock buffer to be logged.
1205 * It does not provide the higher level of locking that is
1206 * needed to protect the in-core superblock from concurrent
1207 * access.
1208 */
1209 void
1210 xfs_mod_sb(xfs_trans_t *tp, __int64_t fields)
1211 {
1212 xfs_buf_t *bp;
1213 int first;
1214 int last;
1215 xfs_mount_t *mp;
1216 xfs_sb_t *sbp;
1217 xfs_sb_field_t f;
1218
1219 ASSERT(fields);
1220 if (!fields)
1221 return;
1222 mp = tp->t_mountp;
1223 bp = xfs_trans_getsb(tp, mp, 0);
1224 sbp = XFS_BUF_TO_SBP(bp);
1225 first = sizeof(xfs_sb_t);
1226 last = 0;
1227
1228 /* translate/copy */
1229
1230 xfs_xlatesb(XFS_BUF_PTR(bp), &(mp->m_sb), -1, ARCH_CONVERT, fields);
1231
1232 /* find modified range */
1233
1234 f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields);
1235 ASSERT((1LL << f) & XFS_SB_MOD_BITS);
1236 first = xfs_sb_info[f].offset;
1237
1238 f = (xfs_sb_field_t)xfs_highbit64((__uint64_t)fields);
1239 ASSERT((1LL << f) & XFS_SB_MOD_BITS);
1240 last = xfs_sb_info[f + 1].offset - 1;
1241
1242 xfs_trans_log_buf(tp, bp, first, last);
1243 }
1244
1245 /*
1246 * xfs_mod_incore_sb_unlocked() is a utility routine common used to apply
1247 * a delta to a specified field in the in-core superblock. Simply
1248 * switch on the field indicated and apply the delta to that field.
1249 * Fields are not allowed to dip below zero, so if the delta would
1250 * do this do not apply it and return EINVAL.
1251 *
1252 * The SB_LOCK must be held when this routine is called.
1253 */
1254 STATIC int
1255 xfs_mod_incore_sb_unlocked(xfs_mount_t *mp, xfs_sb_field_t field,
1256 int delta, int rsvd)
1257 {
1258 int scounter; /* short counter for 32 bit fields */
1259 long long lcounter; /* long counter for 64 bit fields */
1260 long long res_used, rem;
1261
1262 /*
1263 * With the in-core superblock spin lock held, switch
1264 * on the indicated field. Apply the delta to the
1265 * proper field. If the fields value would dip below
1266 * 0, then do not apply the delta and return EINVAL.
1267 */
1268 switch (field) {
1269 case XFS_SBS_ICOUNT:
1270 lcounter = (long long)mp->m_sb.sb_icount;
1271 lcounter += delta;
1272 if (lcounter < 0) {
1273 ASSERT(0);
1274 return (XFS_ERROR(EINVAL));
1275 }
1276 mp->m_sb.sb_icount = lcounter;
1277 return (0);
1278 case XFS_SBS_IFREE:
1279 lcounter = (long long)mp->m_sb.sb_ifree;
1280 lcounter += delta;
1281 if (lcounter < 0) {
1282 ASSERT(0);
1283 return (XFS_ERROR(EINVAL));
1284 }
1285 mp->m_sb.sb_ifree = lcounter;
1286 return (0);
1287 case XFS_SBS_FDBLOCKS:
1288
1289 lcounter = (long long)mp->m_sb.sb_fdblocks;
1290 res_used = (long long)(mp->m_resblks - mp->m_resblks_avail);
1291
1292 if (delta > 0) { /* Putting blocks back */
1293 if (res_used > delta) {
1294 mp->m_resblks_avail += delta;
1295 } else {
1296 rem = delta - res_used;
1297 mp->m_resblks_avail = mp->m_resblks;
1298 lcounter += rem;
1299 }
1300 } else { /* Taking blocks away */
1301
1302 lcounter += delta;
1303
1304 /*
1305 * If were out of blocks, use any available reserved blocks if
1306 * were allowed to.
1307 */
1308
1309 if (lcounter < 0) {
1310 if (rsvd) {
1311 lcounter = (long long)mp->m_resblks_avail + delta;
1312 if (lcounter < 0) {
1313 return (XFS_ERROR(ENOSPC));
1314 }
1315 mp->m_resblks_avail = lcounter;
1316 return (0);
1317 } else { /* not reserved */
1318 return (XFS_ERROR(ENOSPC));
1319 }
1320 }
1321 }
1322
1323 mp->m_sb.sb_fdblocks = lcounter;
1324 return (0);
1325 case XFS_SBS_FREXTENTS:
1326 lcounter = (long long)mp->m_sb.sb_frextents;
1327 lcounter += delta;
1328 if (lcounter < 0) {
1329 return (XFS_ERROR(ENOSPC));
1330 }
1331 mp->m_sb.sb_frextents = lcounter;
1332 return (0);
1333 case XFS_SBS_DBLOCKS:
1334 lcounter = (long long)mp->m_sb.sb_dblocks;
1335 lcounter += delta;
1336 if (lcounter < 0) {
1337 ASSERT(0);
1338 return (XFS_ERROR(EINVAL));
1339 }
1340 mp->m_sb.sb_dblocks = lcounter;
1341 return (0);
1342 case XFS_SBS_AGCOUNT:
1343 scounter = mp->m_sb.sb_agcount;
1344 scounter += delta;
1345 if (scounter < 0) {
1346 ASSERT(0);
1347 return (XFS_ERROR(EINVAL));
1348 }
1349 mp->m_sb.sb_agcount = scounter;
1350 return (0);
1351 case XFS_SBS_IMAX_PCT:
1352 scounter = mp->m_sb.sb_imax_pct;
1353 scounter += delta;
1354 if (scounter < 0) {
1355 ASSERT(0);
1356 return (XFS_ERROR(EINVAL));
1357 }
1358 mp->m_sb.sb_imax_pct = scounter;
1359 return (0);
1360 case XFS_SBS_REXTSIZE:
1361 scounter = mp->m_sb.sb_rextsize;
1362 scounter += delta;
1363 if (scounter < 0) {
1364 ASSERT(0);
1365 return (XFS_ERROR(EINVAL));
1366 }
1367 mp->m_sb.sb_rextsize = scounter;
1368 return (0);
1369 case XFS_SBS_RBMBLOCKS:
1370 scounter = mp->m_sb.sb_rbmblocks;
1371 scounter += delta;
1372 if (scounter < 0) {
1373 ASSERT(0);
1374 return (XFS_ERROR(EINVAL));
1375 }
1376 mp->m_sb.sb_rbmblocks = scounter;
1377 return (0);
1378 case XFS_SBS_RBLOCKS:
1379 lcounter = (long long)mp->m_sb.sb_rblocks;
1380 lcounter += delta;
1381 if (lcounter < 0) {
1382 ASSERT(0);
1383 return (XFS_ERROR(EINVAL));
1384 }
1385 mp->m_sb.sb_rblocks = lcounter;
1386 return (0);
1387 case XFS_SBS_REXTENTS:
1388 lcounter = (long long)mp->m_sb.sb_rextents;
1389 lcounter += delta;
1390 if (lcounter < 0) {
1391 ASSERT(0);
1392 return (XFS_ERROR(EINVAL));
1393 }
1394 mp->m_sb.sb_rextents = lcounter;
1395 return (0);
1396 case XFS_SBS_REXTSLOG:
1397 scounter = mp->m_sb.sb_rextslog;
1398 scounter += delta;
1399 if (scounter < 0) {
1400 ASSERT(0);
1401 return (XFS_ERROR(EINVAL));
1402 }
1403 mp->m_sb.sb_rextslog = scounter;
1404 return (0);
1405 default:
1406 ASSERT(0);
1407 return (XFS_ERROR(EINVAL));
1408 }
1409 }
1410
1411 /*
1412 * xfs_mod_incore_sb() is used to change a field in the in-core
1413 * superblock structure by the specified delta. This modification
1414 * is protected by the SB_LOCK. Just use the xfs_mod_incore_sb_unlocked()
1415 * routine to do the work.
1416 */
1417 int
1418 xfs_mod_incore_sb(xfs_mount_t *mp, xfs_sb_field_t field, int delta, int rsvd)
1419 {
1420 unsigned long s;
1421 int status;
1422
1423 s = XFS_SB_LOCK(mp);
1424 status = xfs_mod_incore_sb_unlocked(mp, field, delta, rsvd);
1425 XFS_SB_UNLOCK(mp, s);
1426 return (status);
1427 }
1428
1429 /*
1430 * xfs_mod_incore_sb_batch() is used to change more than one field
1431 * in the in-core superblock structure at a time. This modification
1432 * is protected by a lock internal to this module. The fields and
1433 * changes to those fields are specified in the array of xfs_mod_sb
1434 * structures passed in.
1435 *
1436 * Either all of the specified deltas will be applied or none of
1437 * them will. If any modified field dips below 0, then all modifications
1438 * will be backed out and EINVAL will be returned.
1439 */
1440 int
1441 xfs_mod_incore_sb_batch(xfs_mount_t *mp, xfs_mod_sb_t *msb, uint nmsb, int rsvd)
1442 {
1443 unsigned long s;
1444 int status=0;
1445 xfs_mod_sb_t *msbp;
1446
1447 /*
1448 * Loop through the array of mod structures and apply each
1449 * individually. If any fail, then back out all those
1450 * which have already been applied. Do all of this within
1451 * the scope of the SB_LOCK so that all of the changes will
1452 * be atomic.
1453 */
1454 s = XFS_SB_LOCK(mp);
1455 msbp = &msb[0];
1456 for (msbp = &msbp[0]; msbp < (msb + nmsb); msbp++) {
1457 /*
1458 * Apply the delta at index n. If it fails, break
1459 * from the loop so we'll fall into the undo loop
1460 * below.
1461 */
1462 status = xfs_mod_incore_sb_unlocked(mp, msbp->msb_field,
1463 msbp->msb_delta, rsvd);
1464 if (status != 0) {
1465 break;
1466 }
1467 }
1468
1469 /*
1470 * If we didn't complete the loop above, then back out
1471 * any changes made to the superblock. If you add code
1472 * between the loop above and here, make sure that you
1473 * preserve the value of status. Loop back until
1474 * we step below the beginning of the array. Make sure
1475 * we don't touch anything back there.
1476 */
1477 if (status != 0) {
1478 msbp--;
1479 while (msbp >= msb) {
1480 status = xfs_mod_incore_sb_unlocked(mp,
1481 msbp->msb_field, -(msbp->msb_delta), rsvd);
1482 ASSERT(status == 0);
1483 msbp--;
1484 }
1485 }
1486 XFS_SB_UNLOCK(mp, s);
1487 return (status);
1488 }
1489
1490 /*
1491 * xfs_getsb() is called to obtain the buffer for the superblock.
1492 * The buffer is returned locked and read in from disk.
1493 * The buffer should be released with a call to xfs_brelse().
1494 *
1495 * If the flags parameter is BUF_TRYLOCK, then we'll only return
1496 * the superblock buffer if it can be locked without sleeping.
1497 * If it can't then we'll return NULL.
1498 */
1499 xfs_buf_t *
1500 xfs_getsb(
1501 xfs_mount_t *mp,
1502 int flags)
1503 {
1504 xfs_buf_t *bp;
1505
1506 ASSERT(mp->m_sb_bp != NULL);
1507 bp = mp->m_sb_bp;
1508 if (flags & XFS_BUF_TRYLOCK) {
1509 if (!XFS_BUF_CPSEMA(bp)) {
1510 return NULL;
1511 }
1512 } else {
1513 XFS_BUF_PSEMA(bp, PRIBIO);
1514 }
1515 XFS_BUF_HOLD(bp);
1516 ASSERT(XFS_BUF_ISDONE(bp));
1517 return (bp);
1518 }
1519
1520 /*
1521 * Used to free the superblock along various error paths.
1522 */
1523 void
1524 xfs_freesb(
1525 xfs_mount_t *mp)
1526 {
1527 xfs_buf_t *bp;
1528
1529 /*
1530 * Use xfs_getsb() so that the buffer will be locked
1531 * when we call xfs_buf_relse().
1532 */
1533 bp = xfs_getsb(mp, 0);
1534 XFS_BUF_UNMANAGE(bp);
1535 xfs_buf_relse(bp);
1536 mp->m_sb_bp = NULL;
1537 }
1538
1539 /*
1540 * See if the UUID is unique among mounted XFS filesystems.
1541 * Mount fails if UUID is nil or a FS with the same UUID is already mounted.
1542 */
1543 STATIC int
1544 xfs_uuid_mount(
1545 xfs_mount_t *mp)
1546 {
1547 if (uuid_is_nil(&mp->m_sb.sb_uuid)) {
1548 cmn_err(CE_WARN,
1549 "XFS: Filesystem %s has nil UUID - can't mount",
1550 mp->m_fsname);
1551 return -1;
1552 }
1553 if (!uuid_table_insert(&mp->m_sb.sb_uuid)) {
1554 cmn_err(CE_WARN,
1555 "XFS: Filesystem %s has duplicate UUID - can't mount",
1556 mp->m_fsname);
1557 return -1;
1558 }
1559 return 0;
1560 }
1561
1562 /*
1563 * Remove filesystem from the UUID table.
1564 */
1565 STATIC void
1566 xfs_uuid_unmount(
1567 xfs_mount_t *mp)
1568 {
1569 uuid_table_remove(&mp->m_sb.sb_uuid);
1570 }
1571
1572 /*
1573 * Used to log changes to the superblock unit and width fields which could
1574 * be altered by the mount options. Only the first superblock is updated.
1575 */
1576 STATIC void
1577 xfs_mount_log_sbunit(
1578 xfs_mount_t *mp,
1579 __int64_t fields)
1580 {
1581 xfs_trans_t *tp;
1582
1583 ASSERT(fields & (XFS_SB_UNIT|XFS_SB_WIDTH|XFS_SB_UUID));
1584
1585 tp = xfs_trans_alloc(mp, XFS_TRANS_SB_UNIT);
1586 if (xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
1587 XFS_DEFAULT_LOG_COUNT)) {
1588 xfs_trans_cancel(tp, 0);
1589 return;
1590 }
1591 xfs_mod_sb(tp, fields);
1592 xfs_trans_commit(tp, 0, NULL);
1593 }
1594