1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 
19 #include "xfs.h"
20 #include "xfs_bit.h"
21 #include "xfs_log.h"
22 #include "xfs_inum.h"
23 #include "xfs_trans.h"
24 #include "xfs_sb.h"
25 #include "xfs_ag.h"
26 #include "xfs_dir2.h"
27 #include "xfs_alloc.h"
28 #include "xfs_quota.h"
29 #include "xfs_mount.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_ialloc_btree.h"
33 #include "xfs_dinode.h"
34 #include "xfs_inode.h"
35 #include "xfs_btree.h"
36 #include "xfs_btree_trace.h"
37 #include "xfs_ialloc.h"
38 #include "xfs_bmap.h"
39 #include "xfs_rtalloc.h"
40 #include "xfs_error.h"
41 #include "xfs_itable.h"
42 #include "xfs_fsops.h"
43 #include "xfs_attr.h"
44 #include "xfs_buf_item.h"
45 #include "xfs_utils.h"
46 #include "xfs_vnodeops.h"
47 #include "xfs_log_priv.h"
48 #include "xfs_trans_priv.h"
49 #include "xfs_filestream.h"
50 #include "xfs_da_btree.h"
51 #include "xfs_extfree_item.h"
52 #include "xfs_mru_cache.h"
53 #include "xfs_inode_item.h"
54 #include "xfs_sync.h"
55 #include "xfs_trace.h"
56 
57 #include <linux/namei.h>
58 #include <linux/init.h>
59 #include <linux/slab.h>
60 #include <linux/mount.h>
61 #include <linux/mempool.h>
62 #include <linux/writeback.h>
63 #include <linux/kthread.h>
64 #include <linux/freezer.h>
65 #include <linux/parser.h>
66 
67 static const struct super_operations xfs_super_operations;
68 static kmem_zone_t *xfs_ioend_zone;
69 mempool_t *xfs_ioend_pool;
70 
71 #define MNTOPT_LOGBUFS	"logbufs"	/* number of XFS log buffers */
72 #define MNTOPT_LOGBSIZE	"logbsize"	/* size of XFS log buffers */
73 #define MNTOPT_LOGDEV	"logdev"	/* log device */
74 #define MNTOPT_RTDEV	"rtdev"		/* realtime I/O device */
75 #define MNTOPT_BIOSIZE	"biosize"	/* log2 of preferred buffered io size */
76 #define MNTOPT_WSYNC	"wsync"		/* safe-mode nfs compatible mount */
77 #define MNTOPT_NOALIGN	"noalign"	/* turn off stripe alignment */
78 #define MNTOPT_SWALLOC	"swalloc"	/* turn on stripe width allocation */
79 #define MNTOPT_SUNIT	"sunit"		/* data volume stripe unit */
80 #define MNTOPT_SWIDTH	"swidth"	/* data volume stripe width */
81 #define MNTOPT_NOUUID	"nouuid"	/* ignore filesystem UUID */
82 #define MNTOPT_MTPT	"mtpt"		/* filesystem mount point */
83 #define MNTOPT_GRPID	"grpid"		/* group-ID from parent directory */
84 #define MNTOPT_NOGRPID	"nogrpid"	/* group-ID from current process */
85 #define MNTOPT_BSDGROUPS    "bsdgroups"    /* group-ID from parent directory */
86 #define MNTOPT_SYSVGROUPS   "sysvgroups"   /* group-ID from current process */
87 #define MNTOPT_ALLOCSIZE    "allocsize"    /* preferred allocation size */
88 #define MNTOPT_NORECOVERY   "norecovery"   /* don't run XFS recovery */
89 #define MNTOPT_BARRIER	"barrier"	/* use writer barriers for log write and
90 					 * unwritten extent conversion */
91 #define MNTOPT_NOBARRIER "nobarrier"	/* .. disable */
92 #define MNTOPT_64BITINODE   "inode64"	/* inodes can be allocated anywhere */
93 #define MNTOPT_IKEEP	"ikeep"		/* do not free empty inode clusters */
94 #define MNTOPT_NOIKEEP	"noikeep"	/* free empty inode clusters */
95 #define MNTOPT_LARGEIO	   "largeio"	/* report large I/O sizes in stat() */
96 #define MNTOPT_NOLARGEIO   "nolargeio"	/* do not report large I/O sizes
97 					 * in stat(). */
98 #define MNTOPT_ATTR2	"attr2"		/* do use attr2 attribute format */
99 #define MNTOPT_NOATTR2	"noattr2"	/* do not use attr2 attribute format */
100 #define MNTOPT_FILESTREAM  "filestreams" /* use filestreams allocator */
101 #define MNTOPT_QUOTA	"quota"		/* disk quotas (user) */
102 #define MNTOPT_NOQUOTA	"noquota"	/* no quotas */
103 #define MNTOPT_USRQUOTA	"usrquota"	/* user quota enabled */
104 #define MNTOPT_GRPQUOTA	"grpquota"	/* group quota enabled */
105 #define MNTOPT_PRJQUOTA	"prjquota"	/* project quota enabled */
106 #define MNTOPT_UQUOTA	"uquota"	/* user quota (IRIX variant) */
107 #define MNTOPT_GQUOTA	"gquota"	/* group quota (IRIX variant) */
108 #define MNTOPT_PQUOTA	"pquota"	/* project quota (IRIX variant) */
109 #define MNTOPT_UQUOTANOENF "uqnoenforce"/* user quota limit enforcement */
110 #define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */
111 #define MNTOPT_PQUOTANOENF "pqnoenforce"/* project quota limit enforcement */
112 #define MNTOPT_QUOTANOENF  "qnoenforce"	/* same as uqnoenforce */
113 #define MNTOPT_DELAYLOG   "delaylog"	/* Delayed loging enabled */
114 #define MNTOPT_NODELAYLOG "nodelaylog"	/* Delayed loging disabled */
115 
116 /*
117  * Table driven mount option parser.
118  *
119  * Currently only used for remount, but it will be used for mount
120  * in the future, too.
121  */
122 enum {
123 	Opt_barrier, Opt_nobarrier, Opt_err
124 };
125 
126 static const match_table_t tokens = {
127 	{Opt_barrier, "barrier"},
128 	{Opt_nobarrier, "nobarrier"},
129 	{Opt_err, NULL}
130 };
131 
132 
133 STATIC unsigned long
suffix_strtoul(char * s,char ** endp,unsigned int base)134 suffix_strtoul(char *s, char **endp, unsigned int base)
135 {
136 	int	last, shift_left_factor = 0;
137 	char	*value = s;
138 
139 	last = strlen(value) - 1;
140 	if (value[last] == 'K' || value[last] == 'k') {
141 		shift_left_factor = 10;
142 		value[last] = '\0';
143 	}
144 	if (value[last] == 'M' || value[last] == 'm') {
145 		shift_left_factor = 20;
146 		value[last] = '\0';
147 	}
148 	if (value[last] == 'G' || value[last] == 'g') {
149 		shift_left_factor = 30;
150 		value[last] = '\0';
151 	}
152 
153 	return simple_strtoul((const char *)s, endp, base) << shift_left_factor;
154 }
155 
156 /*
157  * This function fills in xfs_mount_t fields based on mount args.
158  * Note: the superblock has _not_ yet been read in.
159  *
160  * Note that this function leaks the various device name allocations on
161  * failure.  The caller takes care of them.
162  */
163 STATIC int
xfs_parseargs(struct xfs_mount * mp,char * options)164 xfs_parseargs(
165 	struct xfs_mount	*mp,
166 	char			*options)
167 {
168 	struct super_block	*sb = mp->m_super;
169 	char			*this_char, *value, *eov;
170 	int			dsunit = 0;
171 	int			dswidth = 0;
172 	int			iosize = 0;
173 	__uint8_t		iosizelog = 0;
174 
175 	/*
176 	 * set up the mount name first so all the errors will refer to the
177 	 * correct device.
178 	 */
179 	mp->m_fsname = kstrndup(sb->s_id, MAXNAMELEN, GFP_KERNEL);
180 	if (!mp->m_fsname)
181 		return ENOMEM;
182 	mp->m_fsname_len = strlen(mp->m_fsname) + 1;
183 
184 	/*
185 	 * Copy binary VFS mount flags we are interested in.
186 	 */
187 	if (sb->s_flags & MS_RDONLY)
188 		mp->m_flags |= XFS_MOUNT_RDONLY;
189 	if (sb->s_flags & MS_DIRSYNC)
190 		mp->m_flags |= XFS_MOUNT_DIRSYNC;
191 	if (sb->s_flags & MS_SYNCHRONOUS)
192 		mp->m_flags |= XFS_MOUNT_WSYNC;
193 
194 	/*
195 	 * Set some default flags that could be cleared by the mount option
196 	 * parsing.
197 	 */
198 	mp->m_flags |= XFS_MOUNT_BARRIER;
199 	mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
200 	mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
201 	mp->m_flags |= XFS_MOUNT_DELAYLOG;
202 
203 	/*
204 	 * These can be overridden by the mount option parsing.
205 	 */
206 	mp->m_logbufs = -1;
207 	mp->m_logbsize = -1;
208 
209 	if (!options)
210 		goto done;
211 
212 	while ((this_char = strsep(&options, ",")) != NULL) {
213 		if (!*this_char)
214 			continue;
215 		if ((value = strchr(this_char, '=')) != NULL)
216 			*value++ = 0;
217 
218 		if (!strcmp(this_char, MNTOPT_LOGBUFS)) {
219 			if (!value || !*value) {
220 				xfs_warn(mp, "%s option requires an argument",
221 					this_char);
222 				return EINVAL;
223 			}
224 			mp->m_logbufs = simple_strtoul(value, &eov, 10);
225 		} else if (!strcmp(this_char, MNTOPT_LOGBSIZE)) {
226 			if (!value || !*value) {
227 				xfs_warn(mp, "%s option requires an argument",
228 					this_char);
229 				return EINVAL;
230 			}
231 			mp->m_logbsize = suffix_strtoul(value, &eov, 10);
232 		} else if (!strcmp(this_char, MNTOPT_LOGDEV)) {
233 			if (!value || !*value) {
234 				xfs_warn(mp, "%s option requires an argument",
235 					this_char);
236 				return EINVAL;
237 			}
238 			mp->m_logname = kstrndup(value, MAXNAMELEN, GFP_KERNEL);
239 			if (!mp->m_logname)
240 				return ENOMEM;
241 		} else if (!strcmp(this_char, MNTOPT_MTPT)) {
242 			xfs_warn(mp, "%s option not allowed on this system",
243 				this_char);
244 			return EINVAL;
245 		} else if (!strcmp(this_char, MNTOPT_RTDEV)) {
246 			if (!value || !*value) {
247 				xfs_warn(mp, "%s option requires an argument",
248 					this_char);
249 				return EINVAL;
250 			}
251 			mp->m_rtname = kstrndup(value, MAXNAMELEN, GFP_KERNEL);
252 			if (!mp->m_rtname)
253 				return ENOMEM;
254 		} else if (!strcmp(this_char, MNTOPT_BIOSIZE)) {
255 			if (!value || !*value) {
256 				xfs_warn(mp, "%s option requires an argument",
257 					this_char);
258 				return EINVAL;
259 			}
260 			iosize = simple_strtoul(value, &eov, 10);
261 			iosizelog = ffs(iosize) - 1;
262 		} else if (!strcmp(this_char, MNTOPT_ALLOCSIZE)) {
263 			if (!value || !*value) {
264 				xfs_warn(mp, "%s option requires an argument",
265 					this_char);
266 				return EINVAL;
267 			}
268 			iosize = suffix_strtoul(value, &eov, 10);
269 			iosizelog = ffs(iosize) - 1;
270 		} else if (!strcmp(this_char, MNTOPT_GRPID) ||
271 			   !strcmp(this_char, MNTOPT_BSDGROUPS)) {
272 			mp->m_flags |= XFS_MOUNT_GRPID;
273 		} else if (!strcmp(this_char, MNTOPT_NOGRPID) ||
274 			   !strcmp(this_char, MNTOPT_SYSVGROUPS)) {
275 			mp->m_flags &= ~XFS_MOUNT_GRPID;
276 		} else if (!strcmp(this_char, MNTOPT_WSYNC)) {
277 			mp->m_flags |= XFS_MOUNT_WSYNC;
278 		} else if (!strcmp(this_char, MNTOPT_NORECOVERY)) {
279 			mp->m_flags |= XFS_MOUNT_NORECOVERY;
280 		} else if (!strcmp(this_char, MNTOPT_NOALIGN)) {
281 			mp->m_flags |= XFS_MOUNT_NOALIGN;
282 		} else if (!strcmp(this_char, MNTOPT_SWALLOC)) {
283 			mp->m_flags |= XFS_MOUNT_SWALLOC;
284 		} else if (!strcmp(this_char, MNTOPT_SUNIT)) {
285 			if (!value || !*value) {
286 				xfs_warn(mp, "%s option requires an argument",
287 					this_char);
288 				return EINVAL;
289 			}
290 			dsunit = simple_strtoul(value, &eov, 10);
291 		} else if (!strcmp(this_char, MNTOPT_SWIDTH)) {
292 			if (!value || !*value) {
293 				xfs_warn(mp, "%s option requires an argument",
294 					this_char);
295 				return EINVAL;
296 			}
297 			dswidth = simple_strtoul(value, &eov, 10);
298 		} else if (!strcmp(this_char, MNTOPT_64BITINODE)) {
299 			mp->m_flags &= ~XFS_MOUNT_SMALL_INUMS;
300 #if !XFS_BIG_INUMS
301 			xfs_warn(mp, "%s option not allowed on this system",
302 				this_char);
303 			return EINVAL;
304 #endif
305 		} else if (!strcmp(this_char, MNTOPT_NOUUID)) {
306 			mp->m_flags |= XFS_MOUNT_NOUUID;
307 		} else if (!strcmp(this_char, MNTOPT_BARRIER)) {
308 			mp->m_flags |= XFS_MOUNT_BARRIER;
309 		} else if (!strcmp(this_char, MNTOPT_NOBARRIER)) {
310 			mp->m_flags &= ~XFS_MOUNT_BARRIER;
311 		} else if (!strcmp(this_char, MNTOPT_IKEEP)) {
312 			mp->m_flags |= XFS_MOUNT_IKEEP;
313 		} else if (!strcmp(this_char, MNTOPT_NOIKEEP)) {
314 			mp->m_flags &= ~XFS_MOUNT_IKEEP;
315 		} else if (!strcmp(this_char, MNTOPT_LARGEIO)) {
316 			mp->m_flags &= ~XFS_MOUNT_COMPAT_IOSIZE;
317 		} else if (!strcmp(this_char, MNTOPT_NOLARGEIO)) {
318 			mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
319 		} else if (!strcmp(this_char, MNTOPT_ATTR2)) {
320 			mp->m_flags |= XFS_MOUNT_ATTR2;
321 		} else if (!strcmp(this_char, MNTOPT_NOATTR2)) {
322 			mp->m_flags &= ~XFS_MOUNT_ATTR2;
323 			mp->m_flags |= XFS_MOUNT_NOATTR2;
324 		} else if (!strcmp(this_char, MNTOPT_FILESTREAM)) {
325 			mp->m_flags |= XFS_MOUNT_FILESTREAMS;
326 		} else if (!strcmp(this_char, MNTOPT_NOQUOTA)) {
327 			mp->m_qflags &= ~(XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
328 					  XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
329 					  XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
330 					  XFS_UQUOTA_ENFD | XFS_OQUOTA_ENFD);
331 		} else if (!strcmp(this_char, MNTOPT_QUOTA) ||
332 			   !strcmp(this_char, MNTOPT_UQUOTA) ||
333 			   !strcmp(this_char, MNTOPT_USRQUOTA)) {
334 			mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE |
335 					 XFS_UQUOTA_ENFD);
336 		} else if (!strcmp(this_char, MNTOPT_QUOTANOENF) ||
337 			   !strcmp(this_char, MNTOPT_UQUOTANOENF)) {
338 			mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE);
339 			mp->m_qflags &= ~XFS_UQUOTA_ENFD;
340 		} else if (!strcmp(this_char, MNTOPT_PQUOTA) ||
341 			   !strcmp(this_char, MNTOPT_PRJQUOTA)) {
342 			mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE |
343 					 XFS_OQUOTA_ENFD);
344 		} else if (!strcmp(this_char, MNTOPT_PQUOTANOENF)) {
345 			mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
346 			mp->m_qflags &= ~XFS_OQUOTA_ENFD;
347 		} else if (!strcmp(this_char, MNTOPT_GQUOTA) ||
348 			   !strcmp(this_char, MNTOPT_GRPQUOTA)) {
349 			mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE |
350 					 XFS_OQUOTA_ENFD);
351 		} else if (!strcmp(this_char, MNTOPT_GQUOTANOENF)) {
352 			mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
353 			mp->m_qflags &= ~XFS_OQUOTA_ENFD;
354 		} else if (!strcmp(this_char, MNTOPT_DELAYLOG)) {
355 			mp->m_flags |= XFS_MOUNT_DELAYLOG;
356 		} else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) {
357 			mp->m_flags &= ~XFS_MOUNT_DELAYLOG;
358 		} else if (!strcmp(this_char, "ihashsize")) {
359 			xfs_warn(mp,
360 	"ihashsize no longer used, option is deprecated.");
361 		} else if (!strcmp(this_char, "osyncisdsync")) {
362 			xfs_warn(mp,
363 	"osyncisdsync has no effect, option is deprecated.");
364 		} else if (!strcmp(this_char, "osyncisosync")) {
365 			xfs_warn(mp,
366 	"osyncisosync has no effect, option is deprecated.");
367 		} else if (!strcmp(this_char, "irixsgid")) {
368 			xfs_warn(mp,
369 	"irixsgid is now a sysctl(2) variable, option is deprecated.");
370 		} else {
371 			xfs_warn(mp, "unknown mount option [%s].", this_char);
372 			return EINVAL;
373 		}
374 	}
375 
376 	/*
377 	 * no recovery flag requires a read-only mount
378 	 */
379 	if ((mp->m_flags & XFS_MOUNT_NORECOVERY) &&
380 	    !(mp->m_flags & XFS_MOUNT_RDONLY)) {
381 		xfs_warn(mp, "no-recovery mounts must be read-only.");
382 		return EINVAL;
383 	}
384 
385 	if ((mp->m_flags & XFS_MOUNT_NOALIGN) && (dsunit || dswidth)) {
386 		xfs_warn(mp,
387 	"sunit and swidth options incompatible with the noalign option");
388 		return EINVAL;
389 	}
390 
391 #ifndef CONFIG_XFS_QUOTA
392 	if (XFS_IS_QUOTA_RUNNING(mp)) {
393 		xfs_warn(mp, "quota support not available in this kernel.");
394 		return EINVAL;
395 	}
396 #endif
397 
398 	if ((mp->m_qflags & (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE)) &&
399 	    (mp->m_qflags & (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE))) {
400 		xfs_warn(mp, "cannot mount with both project and group quota");
401 		return EINVAL;
402 	}
403 
404 	if ((dsunit && !dswidth) || (!dsunit && dswidth)) {
405 		xfs_warn(mp, "sunit and swidth must be specified together");
406 		return EINVAL;
407 	}
408 
409 	if (dsunit && (dswidth % dsunit != 0)) {
410 		xfs_warn(mp,
411 	"stripe width (%d) must be a multiple of the stripe unit (%d)",
412 			dswidth, dsunit);
413 		return EINVAL;
414 	}
415 
416 done:
417 	if (!(mp->m_flags & XFS_MOUNT_NOALIGN)) {
418 		/*
419 		 * At this point the superblock has not been read
420 		 * in, therefore we do not know the block size.
421 		 * Before the mount call ends we will convert
422 		 * these to FSBs.
423 		 */
424 		if (dsunit) {
425 			mp->m_dalign = dsunit;
426 			mp->m_flags |= XFS_MOUNT_RETERR;
427 		}
428 
429 		if (dswidth)
430 			mp->m_swidth = dswidth;
431 	}
432 
433 	if (mp->m_logbufs != -1 &&
434 	    mp->m_logbufs != 0 &&
435 	    (mp->m_logbufs < XLOG_MIN_ICLOGS ||
436 	     mp->m_logbufs > XLOG_MAX_ICLOGS)) {
437 		xfs_warn(mp, "invalid logbufs value: %d [not %d-%d]",
438 			mp->m_logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
439 		return XFS_ERROR(EINVAL);
440 	}
441 	if (mp->m_logbsize != -1 &&
442 	    mp->m_logbsize !=  0 &&
443 	    (mp->m_logbsize < XLOG_MIN_RECORD_BSIZE ||
444 	     mp->m_logbsize > XLOG_MAX_RECORD_BSIZE ||
445 	     !is_power_of_2(mp->m_logbsize))) {
446 		xfs_warn(mp,
447 			"invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
448 			mp->m_logbsize);
449 		return XFS_ERROR(EINVAL);
450 	}
451 
452 	if (iosizelog) {
453 		if (iosizelog > XFS_MAX_IO_LOG ||
454 		    iosizelog < XFS_MIN_IO_LOG) {
455 			xfs_warn(mp, "invalid log iosize: %d [not %d-%d]",
456 				iosizelog, XFS_MIN_IO_LOG,
457 				XFS_MAX_IO_LOG);
458 			return XFS_ERROR(EINVAL);
459 		}
460 
461 		mp->m_flags |= XFS_MOUNT_DFLT_IOSIZE;
462 		mp->m_readio_log = iosizelog;
463 		mp->m_writeio_log = iosizelog;
464 	}
465 
466 	return 0;
467 }
468 
469 struct proc_xfs_info {
470 	int	flag;
471 	char	*str;
472 };
473 
474 STATIC int
xfs_showargs(struct xfs_mount * mp,struct seq_file * m)475 xfs_showargs(
476 	struct xfs_mount	*mp,
477 	struct seq_file		*m)
478 {
479 	static struct proc_xfs_info xfs_info_set[] = {
480 		/* the few simple ones we can get from the mount struct */
481 		{ XFS_MOUNT_IKEEP,		"," MNTOPT_IKEEP },
482 		{ XFS_MOUNT_WSYNC,		"," MNTOPT_WSYNC },
483 		{ XFS_MOUNT_NOALIGN,		"," MNTOPT_NOALIGN },
484 		{ XFS_MOUNT_SWALLOC,		"," MNTOPT_SWALLOC },
485 		{ XFS_MOUNT_NOUUID,		"," MNTOPT_NOUUID },
486 		{ XFS_MOUNT_NORECOVERY,		"," MNTOPT_NORECOVERY },
487 		{ XFS_MOUNT_ATTR2,		"," MNTOPT_ATTR2 },
488 		{ XFS_MOUNT_FILESTREAMS,	"," MNTOPT_FILESTREAM },
489 		{ XFS_MOUNT_GRPID,		"," MNTOPT_GRPID },
490 		{ XFS_MOUNT_DELAYLOG,		"," MNTOPT_DELAYLOG },
491 		{ 0, NULL }
492 	};
493 	static struct proc_xfs_info xfs_info_unset[] = {
494 		/* the few simple ones we can get from the mount struct */
495 		{ XFS_MOUNT_COMPAT_IOSIZE,	"," MNTOPT_LARGEIO },
496 		{ XFS_MOUNT_BARRIER,		"," MNTOPT_NOBARRIER },
497 		{ XFS_MOUNT_SMALL_INUMS,	"," MNTOPT_64BITINODE },
498 		{ 0, NULL }
499 	};
500 	struct proc_xfs_info	*xfs_infop;
501 
502 	for (xfs_infop = xfs_info_set; xfs_infop->flag; xfs_infop++) {
503 		if (mp->m_flags & xfs_infop->flag)
504 			seq_puts(m, xfs_infop->str);
505 	}
506 	for (xfs_infop = xfs_info_unset; xfs_infop->flag; xfs_infop++) {
507 		if (!(mp->m_flags & xfs_infop->flag))
508 			seq_puts(m, xfs_infop->str);
509 	}
510 
511 	if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)
512 		seq_printf(m, "," MNTOPT_ALLOCSIZE "=%dk",
513 				(int)(1 << mp->m_writeio_log) >> 10);
514 
515 	if (mp->m_logbufs > 0)
516 		seq_printf(m, "," MNTOPT_LOGBUFS "=%d", mp->m_logbufs);
517 	if (mp->m_logbsize > 0)
518 		seq_printf(m, "," MNTOPT_LOGBSIZE "=%dk", mp->m_logbsize >> 10);
519 
520 	if (mp->m_logname)
521 		seq_printf(m, "," MNTOPT_LOGDEV "=%s", mp->m_logname);
522 	if (mp->m_rtname)
523 		seq_printf(m, "," MNTOPT_RTDEV "=%s", mp->m_rtname);
524 
525 	if (mp->m_dalign > 0)
526 		seq_printf(m, "," MNTOPT_SUNIT "=%d",
527 				(int)XFS_FSB_TO_BB(mp, mp->m_dalign));
528 	if (mp->m_swidth > 0)
529 		seq_printf(m, "," MNTOPT_SWIDTH "=%d",
530 				(int)XFS_FSB_TO_BB(mp, mp->m_swidth));
531 
532 	if (mp->m_qflags & (XFS_UQUOTA_ACCT|XFS_UQUOTA_ENFD))
533 		seq_puts(m, "," MNTOPT_USRQUOTA);
534 	else if (mp->m_qflags & XFS_UQUOTA_ACCT)
535 		seq_puts(m, "," MNTOPT_UQUOTANOENF);
536 
537 	/* Either project or group quotas can be active, not both */
538 
539 	if (mp->m_qflags & XFS_PQUOTA_ACCT) {
540 		if (mp->m_qflags & XFS_OQUOTA_ENFD)
541 			seq_puts(m, "," MNTOPT_PRJQUOTA);
542 		else
543 			seq_puts(m, "," MNTOPT_PQUOTANOENF);
544 	} else if (mp->m_qflags & XFS_GQUOTA_ACCT) {
545 		if (mp->m_qflags & XFS_OQUOTA_ENFD)
546 			seq_puts(m, "," MNTOPT_GRPQUOTA);
547 		else
548 			seq_puts(m, "," MNTOPT_GQUOTANOENF);
549 	}
550 
551 	if (!(mp->m_qflags & XFS_ALL_QUOTA_ACCT))
552 		seq_puts(m, "," MNTOPT_NOQUOTA);
553 
554 	return 0;
555 }
556 __uint64_t
xfs_max_file_offset(unsigned int blockshift)557 xfs_max_file_offset(
558 	unsigned int		blockshift)
559 {
560 	unsigned int		pagefactor = 1;
561 	unsigned int		bitshift = BITS_PER_LONG - 1;
562 
563 	/* Figure out maximum filesize, on Linux this can depend on
564 	 * the filesystem blocksize (on 32 bit platforms).
565 	 * __block_write_begin does this in an [unsigned] long...
566 	 *      page->index << (PAGE_CACHE_SHIFT - bbits)
567 	 * So, for page sized blocks (4K on 32 bit platforms),
568 	 * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is
569 	 *      (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1)
570 	 * but for smaller blocksizes it is less (bbits = log2 bsize).
571 	 * Note1: get_block_t takes a long (implicit cast from above)
572 	 * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch
573 	 * can optionally convert the [unsigned] long from above into
574 	 * an [unsigned] long long.
575 	 */
576 
577 #if BITS_PER_LONG == 32
578 # if defined(CONFIG_LBDAF)
579 	ASSERT(sizeof(sector_t) == 8);
580 	pagefactor = PAGE_CACHE_SIZE;
581 	bitshift = BITS_PER_LONG;
582 # else
583 	pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift);
584 # endif
585 #endif
586 
587 	return (((__uint64_t)pagefactor) << bitshift) - 1;
588 }
589 
590 STATIC int
xfs_blkdev_get(xfs_mount_t * mp,const char * name,struct block_device ** bdevp)591 xfs_blkdev_get(
592 	xfs_mount_t		*mp,
593 	const char		*name,
594 	struct block_device	**bdevp)
595 {
596 	int			error = 0;
597 
598 	*bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
599 				    mp);
600 	if (IS_ERR(*bdevp)) {
601 		error = PTR_ERR(*bdevp);
602 		xfs_warn(mp, "Invalid device [%s], error=%d\n", name, error);
603 	}
604 
605 	return -error;
606 }
607 
608 STATIC void
xfs_blkdev_put(struct block_device * bdev)609 xfs_blkdev_put(
610 	struct block_device	*bdev)
611 {
612 	if (bdev)
613 		blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
614 }
615 
616 /*
617  * Try to write out the superblock using barriers.
618  */
619 STATIC int
xfs_barrier_test(xfs_mount_t * mp)620 xfs_barrier_test(
621 	xfs_mount_t	*mp)
622 {
623 	xfs_buf_t	*sbp = xfs_getsb(mp, 0);
624 	int		error;
625 
626 	XFS_BUF_UNDONE(sbp);
627 	XFS_BUF_UNREAD(sbp);
628 	XFS_BUF_UNDELAYWRITE(sbp);
629 	XFS_BUF_WRITE(sbp);
630 	XFS_BUF_UNASYNC(sbp);
631 	XFS_BUF_ORDERED(sbp);
632 
633 	xfsbdstrat(mp, sbp);
634 	error = xfs_buf_iowait(sbp);
635 
636 	/*
637 	 * Clear all the flags we set and possible error state in the
638 	 * buffer.  We only did the write to try out whether barriers
639 	 * worked and shouldn't leave any traces in the superblock
640 	 * buffer.
641 	 */
642 	XFS_BUF_DONE(sbp);
643 	XFS_BUF_ERROR(sbp, 0);
644 	XFS_BUF_UNORDERED(sbp);
645 
646 	xfs_buf_relse(sbp);
647 	return error;
648 }
649 
650 STATIC void
xfs_mountfs_check_barriers(xfs_mount_t * mp)651 xfs_mountfs_check_barriers(xfs_mount_t *mp)
652 {
653 	int error;
654 
655 	if (mp->m_logdev_targp != mp->m_ddev_targp) {
656 		xfs_notice(mp,
657 		  "Disabling barriers, not supported with external log device");
658 		mp->m_flags &= ~XFS_MOUNT_BARRIER;
659 		return;
660 	}
661 
662 	if (xfs_readonly_buftarg(mp->m_ddev_targp)) {
663 		xfs_notice(mp,
664 			"Disabling barriers, underlying device is readonly");
665 		mp->m_flags &= ~XFS_MOUNT_BARRIER;
666 		return;
667 	}
668 
669 	error = xfs_barrier_test(mp);
670 	if (error) {
671 		xfs_notice(mp,
672 			"Disabling barriers, trial barrier write failed");
673 		mp->m_flags &= ~XFS_MOUNT_BARRIER;
674 		return;
675 	}
676 }
677 
678 void
xfs_blkdev_issue_flush(xfs_buftarg_t * buftarg)679 xfs_blkdev_issue_flush(
680 	xfs_buftarg_t		*buftarg)
681 {
682 	blkdev_issue_flush(buftarg->bt_bdev, GFP_KERNEL, NULL);
683 }
684 
685 STATIC void
xfs_close_devices(struct xfs_mount * mp)686 xfs_close_devices(
687 	struct xfs_mount	*mp)
688 {
689 	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
690 		struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
691 		xfs_free_buftarg(mp, mp->m_logdev_targp);
692 		xfs_blkdev_put(logdev);
693 	}
694 	if (mp->m_rtdev_targp) {
695 		struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
696 		xfs_free_buftarg(mp, mp->m_rtdev_targp);
697 		xfs_blkdev_put(rtdev);
698 	}
699 	xfs_free_buftarg(mp, mp->m_ddev_targp);
700 }
701 
702 /*
703  * The file system configurations are:
704  *	(1) device (partition) with data and internal log
705  *	(2) logical volume with data and log subvolumes.
706  *	(3) logical volume with data, log, and realtime subvolumes.
707  *
708  * We only have to handle opening the log and realtime volumes here if
709  * they are present.  The data subvolume has already been opened by
710  * get_sb_bdev() and is stored in sb->s_bdev.
711  */
712 STATIC int
xfs_open_devices(struct xfs_mount * mp)713 xfs_open_devices(
714 	struct xfs_mount	*mp)
715 {
716 	struct block_device	*ddev = mp->m_super->s_bdev;
717 	struct block_device	*logdev = NULL, *rtdev = NULL;
718 	int			error;
719 
720 	/*
721 	 * Open real time and log devices - order is important.
722 	 */
723 	if (mp->m_logname) {
724 		error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
725 		if (error)
726 			goto out;
727 	}
728 
729 	if (mp->m_rtname) {
730 		error = xfs_blkdev_get(mp, mp->m_rtname, &rtdev);
731 		if (error)
732 			goto out_close_logdev;
733 
734 		if (rtdev == ddev || rtdev == logdev) {
735 			xfs_warn(mp,
736 	"Cannot mount filesystem with identical rtdev and ddev/logdev.");
737 			error = EINVAL;
738 			goto out_close_rtdev;
739 		}
740 	}
741 
742 	/*
743 	 * Setup xfs_mount buffer target pointers
744 	 */
745 	error = ENOMEM;
746 	mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, 0, mp->m_fsname);
747 	if (!mp->m_ddev_targp)
748 		goto out_close_rtdev;
749 
750 	if (rtdev) {
751 		mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, 1,
752 							mp->m_fsname);
753 		if (!mp->m_rtdev_targp)
754 			goto out_free_ddev_targ;
755 	}
756 
757 	if (logdev && logdev != ddev) {
758 		mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, 1,
759 							mp->m_fsname);
760 		if (!mp->m_logdev_targp)
761 			goto out_free_rtdev_targ;
762 	} else {
763 		mp->m_logdev_targp = mp->m_ddev_targp;
764 	}
765 
766 	return 0;
767 
768  out_free_rtdev_targ:
769 	if (mp->m_rtdev_targp)
770 		xfs_free_buftarg(mp, mp->m_rtdev_targp);
771  out_free_ddev_targ:
772 	xfs_free_buftarg(mp, mp->m_ddev_targp);
773  out_close_rtdev:
774 	if (rtdev)
775 		xfs_blkdev_put(rtdev);
776  out_close_logdev:
777 	if (logdev && logdev != ddev)
778 		xfs_blkdev_put(logdev);
779  out:
780 	return error;
781 }
782 
783 /*
784  * Setup xfs_mount buffer target pointers based on superblock
785  */
786 STATIC int
xfs_setup_devices(struct xfs_mount * mp)787 xfs_setup_devices(
788 	struct xfs_mount	*mp)
789 {
790 	int			error;
791 
792 	error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_blocksize,
793 				    mp->m_sb.sb_sectsize);
794 	if (error)
795 		return error;
796 
797 	if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
798 		unsigned int	log_sector_size = BBSIZE;
799 
800 		if (xfs_sb_version_hassector(&mp->m_sb))
801 			log_sector_size = mp->m_sb.sb_logsectsize;
802 		error = xfs_setsize_buftarg(mp->m_logdev_targp,
803 					    mp->m_sb.sb_blocksize,
804 					    log_sector_size);
805 		if (error)
806 			return error;
807 	}
808 	if (mp->m_rtdev_targp) {
809 		error = xfs_setsize_buftarg(mp->m_rtdev_targp,
810 					    mp->m_sb.sb_blocksize,
811 					    mp->m_sb.sb_sectsize);
812 		if (error)
813 			return error;
814 	}
815 
816 	return 0;
817 }
818 
819 /* Catch misguided souls that try to use this interface on XFS */
820 STATIC struct inode *
xfs_fs_alloc_inode(struct super_block * sb)821 xfs_fs_alloc_inode(
822 	struct super_block	*sb)
823 {
824 	BUG();
825 	return NULL;
826 }
827 
828 /*
829  * Now that the generic code is guaranteed not to be accessing
830  * the linux inode, we can reclaim the inode.
831  */
832 STATIC void
xfs_fs_destroy_inode(struct inode * inode)833 xfs_fs_destroy_inode(
834 	struct inode		*inode)
835 {
836 	struct xfs_inode	*ip = XFS_I(inode);
837 
838 	trace_xfs_destroy_inode(ip);
839 
840 	XFS_STATS_INC(vn_reclaim);
841 
842 	/* bad inode, get out here ASAP */
843 	if (is_bad_inode(inode))
844 		goto out_reclaim;
845 
846 	xfs_ioend_wait(ip);
847 
848 	ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0);
849 
850 	/*
851 	 * We should never get here with one of the reclaim flags already set.
852 	 */
853 	ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
854 	ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM));
855 
856 	/*
857 	 * We always use background reclaim here because even if the
858 	 * inode is clean, it still may be under IO and hence we have
859 	 * to take the flush lock. The background reclaim path handles
860 	 * this more efficiently than we can here, so simply let background
861 	 * reclaim tear down all inodes.
862 	 */
863 out_reclaim:
864 	xfs_inode_set_reclaim_tag(ip);
865 }
866 
867 /*
868  * Slab object creation initialisation for the XFS inode.
869  * This covers only the idempotent fields in the XFS inode;
870  * all other fields need to be initialised on allocation
871  * from the slab. This avoids the need to repeatedly initialise
872  * fields in the xfs inode that left in the initialise state
873  * when freeing the inode.
874  */
875 STATIC void
xfs_fs_inode_init_once(void * inode)876 xfs_fs_inode_init_once(
877 	void			*inode)
878 {
879 	struct xfs_inode	*ip = inode;
880 
881 	memset(ip, 0, sizeof(struct xfs_inode));
882 
883 	/* vfs inode */
884 	inode_init_once(VFS_I(ip));
885 
886 	/* xfs inode */
887 	atomic_set(&ip->i_iocount, 0);
888 	atomic_set(&ip->i_pincount, 0);
889 	spin_lock_init(&ip->i_flags_lock);
890 	init_waitqueue_head(&ip->i_ipin_wait);
891 	/*
892 	 * Because we want to use a counting completion, complete
893 	 * the flush completion once to allow a single access to
894 	 * the flush completion without blocking.
895 	 */
896 	init_completion(&ip->i_flush);
897 	complete(&ip->i_flush);
898 
899 	mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
900 		     "xfsino", ip->i_ino);
901 }
902 
903 /*
904  * Dirty the XFS inode when mark_inode_dirty_sync() is called so that
905  * we catch unlogged VFS level updates to the inode.
906  *
907  * We need the barrier() to maintain correct ordering between unlogged
908  * updates and the transaction commit code that clears the i_update_core
909  * field. This requires all updates to be completed before marking the
910  * inode dirty.
911  */
912 STATIC void
xfs_fs_dirty_inode(struct inode * inode)913 xfs_fs_dirty_inode(
914 	struct inode	*inode)
915 {
916 	barrier();
917 	XFS_I(inode)->i_update_core = 1;
918 }
919 
920 STATIC int
xfs_log_inode(struct xfs_inode * ip)921 xfs_log_inode(
922 	struct xfs_inode	*ip)
923 {
924 	struct xfs_mount	*mp = ip->i_mount;
925 	struct xfs_trans	*tp;
926 	int			error;
927 
928 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
929 	tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
930 	error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
931 
932 	if (error) {
933 		xfs_trans_cancel(tp, 0);
934 		/* we need to return with the lock hold shared */
935 		xfs_ilock(ip, XFS_ILOCK_SHARED);
936 		return error;
937 	}
938 
939 	xfs_ilock(ip, XFS_ILOCK_EXCL);
940 
941 	/*
942 	 * Note - it's possible that we might have pushed ourselves out of the
943 	 * way during trans_reserve which would flush the inode.  But there's
944 	 * no guarantee that the inode buffer has actually gone out yet (it's
945 	 * delwri).  Plus the buffer could be pinned anyway if it's part of
946 	 * an inode in another recent transaction.  So we play it safe and
947 	 * fire off the transaction anyway.
948 	 */
949 	xfs_trans_ijoin(tp, ip);
950 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
951 	error = xfs_trans_commit(tp, 0);
952 	xfs_ilock_demote(ip, XFS_ILOCK_EXCL);
953 
954 	return error;
955 }
956 
957 STATIC int
xfs_fs_write_inode(struct inode * inode,struct writeback_control * wbc)958 xfs_fs_write_inode(
959 	struct inode		*inode,
960 	struct writeback_control *wbc)
961 {
962 	struct xfs_inode	*ip = XFS_I(inode);
963 	struct xfs_mount	*mp = ip->i_mount;
964 	int			error = EAGAIN;
965 
966 	trace_xfs_write_inode(ip);
967 
968 	if (XFS_FORCED_SHUTDOWN(mp))
969 		return XFS_ERROR(EIO);
970 
971 	if (wbc->sync_mode == WB_SYNC_ALL) {
972 		/*
973 		 * Make sure the inode has made it it into the log.  Instead
974 		 * of forcing it all the way to stable storage using a
975 		 * synchronous transaction we let the log force inside the
976 		 * ->sync_fs call do that for thus, which reduces the number
977 		 * of synchronous log foces dramatically.
978 		 */
979 		xfs_ioend_wait(ip);
980 		xfs_ilock(ip, XFS_ILOCK_SHARED);
981 		if (ip->i_update_core) {
982 			error = xfs_log_inode(ip);
983 			if (error)
984 				goto out_unlock;
985 		}
986 	} else {
987 		/*
988 		 * We make this non-blocking if the inode is contended, return
989 		 * EAGAIN to indicate to the caller that they did not succeed.
990 		 * This prevents the flush path from blocking on inodes inside
991 		 * another operation right now, they get caught later by
992 		 * xfs_sync.
993 		 */
994 		if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED))
995 			goto out;
996 
997 		if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip))
998 			goto out_unlock;
999 
1000 		/*
1001 		 * Now we have the flush lock and the inode is not pinned, we
1002 		 * can check if the inode is really clean as we know that
1003 		 * there are no pending transaction completions, it is not
1004 		 * waiting on the delayed write queue and there is no IO in
1005 		 * progress.
1006 		 */
1007 		if (xfs_inode_clean(ip)) {
1008 			xfs_ifunlock(ip);
1009 			error = 0;
1010 			goto out_unlock;
1011 		}
1012 		error = xfs_iflush(ip, SYNC_TRYLOCK);
1013 	}
1014 
1015  out_unlock:
1016 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
1017  out:
1018 	/*
1019 	 * if we failed to write out the inode then mark
1020 	 * it dirty again so we'll try again later.
1021 	 */
1022 	if (error)
1023 		xfs_mark_inode_dirty_sync(ip);
1024 	return -error;
1025 }
1026 
1027 STATIC void
xfs_fs_evict_inode(struct inode * inode)1028 xfs_fs_evict_inode(
1029 	struct inode		*inode)
1030 {
1031 	xfs_inode_t		*ip = XFS_I(inode);
1032 
1033 	trace_xfs_evict_inode(ip);
1034 
1035 	truncate_inode_pages(&inode->i_data, 0);
1036 	end_writeback(inode);
1037 	XFS_STATS_INC(vn_rele);
1038 	XFS_STATS_INC(vn_remove);
1039 	XFS_STATS_DEC(vn_active);
1040 
1041 	/*
1042 	 * The iolock is used by the file system to coordinate reads,
1043 	 * writes, and block truncates.  Up to this point the lock
1044 	 * protected concurrent accesses by users of the inode.  But
1045 	 * from here forward we're doing some final processing of the
1046 	 * inode because we're done with it, and although we reuse the
1047 	 * iolock for protection it is really a distinct lock class
1048 	 * (in the lockdep sense) from before.  To keep lockdep happy
1049 	 * (and basically indicate what we are doing), we explicitly
1050 	 * re-init the iolock here.
1051 	 */
1052 	ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
1053 	mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
1054 	lockdep_set_class_and_name(&ip->i_iolock.mr_lock,
1055 			&xfs_iolock_reclaimable, "xfs_iolock_reclaimable");
1056 
1057 	xfs_inactive(ip);
1058 }
1059 
1060 STATIC void
xfs_free_fsname(struct xfs_mount * mp)1061 xfs_free_fsname(
1062 	struct xfs_mount	*mp)
1063 {
1064 	kfree(mp->m_fsname);
1065 	kfree(mp->m_rtname);
1066 	kfree(mp->m_logname);
1067 }
1068 
1069 STATIC void
xfs_fs_put_super(struct super_block * sb)1070 xfs_fs_put_super(
1071 	struct super_block	*sb)
1072 {
1073 	struct xfs_mount	*mp = XFS_M(sb);
1074 
1075 	/*
1076 	 * Unregister the memory shrinker before we tear down the mount
1077 	 * structure so we don't have memory reclaim racing with us here.
1078 	 */
1079 	xfs_inode_shrinker_unregister(mp);
1080 	xfs_syncd_stop(mp);
1081 
1082 	/*
1083 	 * Blow away any referenced inode in the filestreams cache.
1084 	 * This can and will cause log traffic as inodes go inactive
1085 	 * here.
1086 	 */
1087 	xfs_filestream_unmount(mp);
1088 
1089 	XFS_bflush(mp->m_ddev_targp);
1090 
1091 	xfs_unmountfs(mp);
1092 	xfs_freesb(mp);
1093 	xfs_icsb_destroy_counters(mp);
1094 	xfs_close_devices(mp);
1095 	xfs_free_fsname(mp);
1096 	kfree(mp);
1097 }
1098 
1099 STATIC int
xfs_fs_sync_fs(struct super_block * sb,int wait)1100 xfs_fs_sync_fs(
1101 	struct super_block	*sb,
1102 	int			wait)
1103 {
1104 	struct xfs_mount	*mp = XFS_M(sb);
1105 	int			error;
1106 
1107 	/*
1108 	 * Not much we can do for the first async pass.  Writing out the
1109 	 * superblock would be counter-productive as we are going to redirty
1110 	 * when writing out other data and metadata (and writing out a single
1111 	 * block is quite fast anyway).
1112 	 *
1113 	 * Try to asynchronously kick off quota syncing at least.
1114 	 */
1115 	if (!wait) {
1116 		xfs_qm_sync(mp, SYNC_TRYLOCK);
1117 		return 0;
1118 	}
1119 
1120 	error = xfs_quiesce_data(mp);
1121 	if (error)
1122 		return -error;
1123 
1124 	if (laptop_mode) {
1125 		/*
1126 		 * The disk must be active because we're syncing.
1127 		 * We schedule xfssyncd now (now that the disk is
1128 		 * active) instead of later (when it might not be).
1129 		 */
1130 		flush_delayed_work_sync(&mp->m_sync_work);
1131 	}
1132 
1133 	return 0;
1134 }
1135 
1136 STATIC int
xfs_fs_statfs(struct dentry * dentry,struct kstatfs * statp)1137 xfs_fs_statfs(
1138 	struct dentry		*dentry,
1139 	struct kstatfs		*statp)
1140 {
1141 	struct xfs_mount	*mp = XFS_M(dentry->d_sb);
1142 	xfs_sb_t		*sbp = &mp->m_sb;
1143 	struct xfs_inode	*ip = XFS_I(dentry->d_inode);
1144 	__uint64_t		fakeinos, id;
1145 	xfs_extlen_t		lsize;
1146 	__int64_t		ffree;
1147 
1148 	statp->f_type = XFS_SB_MAGIC;
1149 	statp->f_namelen = MAXNAMELEN - 1;
1150 
1151 	id = huge_encode_dev(mp->m_ddev_targp->bt_dev);
1152 	statp->f_fsid.val[0] = (u32)id;
1153 	statp->f_fsid.val[1] = (u32)(id >> 32);
1154 
1155 	xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT);
1156 
1157 	spin_lock(&mp->m_sb_lock);
1158 	statp->f_bsize = sbp->sb_blocksize;
1159 	lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
1160 	statp->f_blocks = sbp->sb_dblocks - lsize;
1161 	statp->f_bfree = statp->f_bavail =
1162 				sbp->sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
1163 	fakeinos = statp->f_bfree << sbp->sb_inopblog;
1164 	statp->f_files =
1165 	    MIN(sbp->sb_icount + fakeinos, (__uint64_t)XFS_MAXINUMBER);
1166 	if (mp->m_maxicount)
1167 		statp->f_files = min_t(typeof(statp->f_files),
1168 					statp->f_files,
1169 					mp->m_maxicount);
1170 
1171 	/* make sure statp->f_ffree does not underflow */
1172 	ffree = statp->f_files - (sbp->sb_icount - sbp->sb_ifree);
1173 	statp->f_ffree = max_t(__int64_t, ffree, 0);
1174 
1175 	spin_unlock(&mp->m_sb_lock);
1176 
1177 	if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) ||
1178 	    ((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD))) ==
1179 			      (XFS_PQUOTA_ACCT|XFS_OQUOTA_ENFD))
1180 		xfs_qm_statvfs(ip, statp);
1181 	return 0;
1182 }
1183 
1184 STATIC void
xfs_save_resvblks(struct xfs_mount * mp)1185 xfs_save_resvblks(struct xfs_mount *mp)
1186 {
1187 	__uint64_t resblks = 0;
1188 
1189 	mp->m_resblks_save = mp->m_resblks;
1190 	xfs_reserve_blocks(mp, &resblks, NULL);
1191 }
1192 
1193 STATIC void
xfs_restore_resvblks(struct xfs_mount * mp)1194 xfs_restore_resvblks(struct xfs_mount *mp)
1195 {
1196 	__uint64_t resblks;
1197 
1198 	if (mp->m_resblks_save) {
1199 		resblks = mp->m_resblks_save;
1200 		mp->m_resblks_save = 0;
1201 	} else
1202 		resblks = xfs_default_resblks(mp);
1203 
1204 	xfs_reserve_blocks(mp, &resblks, NULL);
1205 }
1206 
1207 STATIC int
xfs_fs_remount(struct super_block * sb,int * flags,char * options)1208 xfs_fs_remount(
1209 	struct super_block	*sb,
1210 	int			*flags,
1211 	char			*options)
1212 {
1213 	struct xfs_mount	*mp = XFS_M(sb);
1214 	substring_t		args[MAX_OPT_ARGS];
1215 	char			*p;
1216 	int			error;
1217 
1218 	while ((p = strsep(&options, ",")) != NULL) {
1219 		int token;
1220 
1221 		if (!*p)
1222 			continue;
1223 
1224 		token = match_token(p, tokens, args);
1225 		switch (token) {
1226 		case Opt_barrier:
1227 			mp->m_flags |= XFS_MOUNT_BARRIER;
1228 
1229 			/*
1230 			 * Test if barriers are actually working if we can,
1231 			 * else delay this check until the filesystem is
1232 			 * marked writeable.
1233 			 */
1234 			if (!(mp->m_flags & XFS_MOUNT_RDONLY))
1235 				xfs_mountfs_check_barriers(mp);
1236 			break;
1237 		case Opt_nobarrier:
1238 			mp->m_flags &= ~XFS_MOUNT_BARRIER;
1239 			break;
1240 		default:
1241 			/*
1242 			 * Logically we would return an error here to prevent
1243 			 * users from believing they might have changed
1244 			 * mount options using remount which can't be changed.
1245 			 *
1246 			 * But unfortunately mount(8) adds all options from
1247 			 * mtab and fstab to the mount arguments in some cases
1248 			 * so we can't blindly reject options, but have to
1249 			 * check for each specified option if it actually
1250 			 * differs from the currently set option and only
1251 			 * reject it if that's the case.
1252 			 *
1253 			 * Until that is implemented we return success for
1254 			 * every remount request, and silently ignore all
1255 			 * options that we can't actually change.
1256 			 */
1257 #if 0
1258 			xfs_info(mp,
1259 		"mount option \"%s\" not supported for remount\n", p);
1260 			return -EINVAL;
1261 #else
1262 			break;
1263 #endif
1264 		}
1265 	}
1266 
1267 	/* ro -> rw */
1268 	if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) {
1269 		mp->m_flags &= ~XFS_MOUNT_RDONLY;
1270 		if (mp->m_flags & XFS_MOUNT_BARRIER)
1271 			xfs_mountfs_check_barriers(mp);
1272 
1273 		/*
1274 		 * If this is the first remount to writeable state we
1275 		 * might have some superblock changes to update.
1276 		 */
1277 		if (mp->m_update_flags) {
1278 			error = xfs_mount_log_sb(mp, mp->m_update_flags);
1279 			if (error) {
1280 				xfs_warn(mp, "failed to write sb changes");
1281 				return error;
1282 			}
1283 			mp->m_update_flags = 0;
1284 		}
1285 
1286 		/*
1287 		 * Fill out the reserve pool if it is empty. Use the stashed
1288 		 * value if it is non-zero, otherwise go with the default.
1289 		 */
1290 		xfs_restore_resvblks(mp);
1291 	}
1292 
1293 	/* rw -> ro */
1294 	if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & MS_RDONLY)) {
1295 		/*
1296 		 * After we have synced the data but before we sync the
1297 		 * metadata, we need to free up the reserve block pool so that
1298 		 * the used block count in the superblock on disk is correct at
1299 		 * the end of the remount. Stash the current reserve pool size
1300 		 * so that if we get remounted rw, we can return it to the same
1301 		 * size.
1302 		 */
1303 
1304 		xfs_quiesce_data(mp);
1305 		xfs_save_resvblks(mp);
1306 		xfs_quiesce_attr(mp);
1307 		mp->m_flags |= XFS_MOUNT_RDONLY;
1308 	}
1309 
1310 	return 0;
1311 }
1312 
1313 /*
1314  * Second stage of a freeze. The data is already frozen so we only
1315  * need to take care of the metadata. Once that's done write a dummy
1316  * record to dirty the log in case of a crash while frozen.
1317  */
1318 STATIC int
xfs_fs_freeze(struct super_block * sb)1319 xfs_fs_freeze(
1320 	struct super_block	*sb)
1321 {
1322 	struct xfs_mount	*mp = XFS_M(sb);
1323 
1324 	xfs_save_resvblks(mp);
1325 	xfs_quiesce_attr(mp);
1326 	return -xfs_fs_log_dummy(mp);
1327 }
1328 
1329 STATIC int
xfs_fs_unfreeze(struct super_block * sb)1330 xfs_fs_unfreeze(
1331 	struct super_block	*sb)
1332 {
1333 	struct xfs_mount	*mp = XFS_M(sb);
1334 
1335 	xfs_restore_resvblks(mp);
1336 	return 0;
1337 }
1338 
1339 STATIC int
xfs_fs_show_options(struct seq_file * m,struct vfsmount * mnt)1340 xfs_fs_show_options(
1341 	struct seq_file		*m,
1342 	struct vfsmount		*mnt)
1343 {
1344 	return -xfs_showargs(XFS_M(mnt->mnt_sb), m);
1345 }
1346 
1347 /*
1348  * This function fills in xfs_mount_t fields based on mount args.
1349  * Note: the superblock _has_ now been read in.
1350  */
1351 STATIC int
xfs_finish_flags(struct xfs_mount * mp)1352 xfs_finish_flags(
1353 	struct xfs_mount	*mp)
1354 {
1355 	int			ronly = (mp->m_flags & XFS_MOUNT_RDONLY);
1356 
1357 	/* Fail a mount where the logbuf is smaller than the log stripe */
1358 	if (xfs_sb_version_haslogv2(&mp->m_sb)) {
1359 		if (mp->m_logbsize <= 0 &&
1360 		    mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE) {
1361 			mp->m_logbsize = mp->m_sb.sb_logsunit;
1362 		} else if (mp->m_logbsize > 0 &&
1363 			   mp->m_logbsize < mp->m_sb.sb_logsunit) {
1364 			xfs_warn(mp,
1365 		"logbuf size must be greater than or equal to log stripe size");
1366 			return XFS_ERROR(EINVAL);
1367 		}
1368 	} else {
1369 		/* Fail a mount if the logbuf is larger than 32K */
1370 		if (mp->m_logbsize > XLOG_BIG_RECORD_BSIZE) {
1371 			xfs_warn(mp,
1372 		"logbuf size for version 1 logs must be 16K or 32K");
1373 			return XFS_ERROR(EINVAL);
1374 		}
1375 	}
1376 
1377 	/*
1378 	 * mkfs'ed attr2 will turn on attr2 mount unless explicitly
1379 	 * told by noattr2 to turn it off
1380 	 */
1381 	if (xfs_sb_version_hasattr2(&mp->m_sb) &&
1382 	    !(mp->m_flags & XFS_MOUNT_NOATTR2))
1383 		mp->m_flags |= XFS_MOUNT_ATTR2;
1384 
1385 	/*
1386 	 * prohibit r/w mounts of read-only filesystems
1387 	 */
1388 	if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) {
1389 		xfs_warn(mp,
1390 			"cannot mount a read-only filesystem as read-write");
1391 		return XFS_ERROR(EROFS);
1392 	}
1393 
1394 	return 0;
1395 }
1396 
1397 STATIC int
xfs_fs_fill_super(struct super_block * sb,void * data,int silent)1398 xfs_fs_fill_super(
1399 	struct super_block	*sb,
1400 	void			*data,
1401 	int			silent)
1402 {
1403 	struct inode		*root;
1404 	struct xfs_mount	*mp = NULL;
1405 	int			flags = 0, error = ENOMEM;
1406 
1407 	mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL);
1408 	if (!mp)
1409 		goto out;
1410 
1411 	spin_lock_init(&mp->m_sb_lock);
1412 	mutex_init(&mp->m_growlock);
1413 	atomic_set(&mp->m_active_trans, 0);
1414 
1415 	mp->m_super = sb;
1416 	sb->s_fs_info = mp;
1417 
1418 	error = xfs_parseargs(mp, (char *)data);
1419 	if (error)
1420 		goto out_free_fsname;
1421 
1422 	sb_min_blocksize(sb, BBSIZE);
1423 	sb->s_xattr = xfs_xattr_handlers;
1424 	sb->s_export_op = &xfs_export_operations;
1425 #ifdef CONFIG_XFS_QUOTA
1426 	sb->s_qcop = &xfs_quotactl_operations;
1427 #endif
1428 	sb->s_op = &xfs_super_operations;
1429 
1430 	if (silent)
1431 		flags |= XFS_MFSI_QUIET;
1432 
1433 	error = xfs_open_devices(mp);
1434 	if (error)
1435 		goto out_free_fsname;
1436 
1437 	error = xfs_icsb_init_counters(mp);
1438 	if (error)
1439 		goto out_close_devices;
1440 
1441 	error = xfs_readsb(mp, flags);
1442 	if (error)
1443 		goto out_destroy_counters;
1444 
1445 	error = xfs_finish_flags(mp);
1446 	if (error)
1447 		goto out_free_sb;
1448 
1449 	error = xfs_setup_devices(mp);
1450 	if (error)
1451 		goto out_free_sb;
1452 
1453 	if (mp->m_flags & XFS_MOUNT_BARRIER)
1454 		xfs_mountfs_check_barriers(mp);
1455 
1456 	error = xfs_filestream_mount(mp);
1457 	if (error)
1458 		goto out_free_sb;
1459 
1460 	/*
1461 	 * we must configure the block size in the superblock before we run the
1462 	 * full mount process as the mount process can lookup and cache inodes.
1463 	 * For the same reason we must also initialise the syncd and register
1464 	 * the inode cache shrinker so that inodes can be reclaimed during
1465 	 * operations like a quotacheck that iterate all inodes in the
1466 	 * filesystem.
1467 	 */
1468 	sb->s_magic = XFS_SB_MAGIC;
1469 	sb->s_blocksize = mp->m_sb.sb_blocksize;
1470 	sb->s_blocksize_bits = ffs(sb->s_blocksize) - 1;
1471 	sb->s_maxbytes = xfs_max_file_offset(sb->s_blocksize_bits);
1472 	sb->s_time_gran = 1;
1473 	set_posix_acl_flag(sb);
1474 
1475 	error = xfs_syncd_init(mp);
1476 	if (error)
1477 		goto out_filestream_unmount;
1478 
1479 	xfs_inode_shrinker_register(mp);
1480 
1481 	error = xfs_mountfs(mp);
1482 	if (error)
1483 		goto out_syncd_stop;
1484 
1485 	root = igrab(VFS_I(mp->m_rootip));
1486 	if (!root) {
1487 		error = ENOENT;
1488 		goto fail_unmount;
1489 	}
1490 	if (is_bad_inode(root)) {
1491 		error = EINVAL;
1492 		goto fail_vnrele;
1493 	}
1494 	sb->s_root = d_alloc_root(root);
1495 	if (!sb->s_root) {
1496 		error = ENOMEM;
1497 		goto fail_vnrele;
1498 	}
1499 
1500 	return 0;
1501 
1502  out_syncd_stop:
1503 	xfs_inode_shrinker_unregister(mp);
1504 	xfs_syncd_stop(mp);
1505  out_filestream_unmount:
1506 	xfs_filestream_unmount(mp);
1507  out_free_sb:
1508 	xfs_freesb(mp);
1509  out_destroy_counters:
1510 	xfs_icsb_destroy_counters(mp);
1511  out_close_devices:
1512 	xfs_close_devices(mp);
1513  out_free_fsname:
1514 	xfs_free_fsname(mp);
1515 	kfree(mp);
1516  out:
1517 	return -error;
1518 
1519  fail_vnrele:
1520 	if (sb->s_root) {
1521 		dput(sb->s_root);
1522 		sb->s_root = NULL;
1523 	} else {
1524 		iput(root);
1525 	}
1526 
1527  fail_unmount:
1528 	xfs_inode_shrinker_unregister(mp);
1529 	xfs_syncd_stop(mp);
1530 
1531 	/*
1532 	 * Blow away any referenced inode in the filestreams cache.
1533 	 * This can and will cause log traffic as inodes go inactive
1534 	 * here.
1535 	 */
1536 	xfs_filestream_unmount(mp);
1537 
1538 	XFS_bflush(mp->m_ddev_targp);
1539 
1540 	xfs_unmountfs(mp);
1541 	goto out_free_sb;
1542 }
1543 
1544 STATIC struct dentry *
xfs_fs_mount(struct file_system_type * fs_type,int flags,const char * dev_name,void * data)1545 xfs_fs_mount(
1546 	struct file_system_type	*fs_type,
1547 	int			flags,
1548 	const char		*dev_name,
1549 	void			*data)
1550 {
1551 	return mount_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super);
1552 }
1553 
1554 static const struct super_operations xfs_super_operations = {
1555 	.alloc_inode		= xfs_fs_alloc_inode,
1556 	.destroy_inode		= xfs_fs_destroy_inode,
1557 	.dirty_inode		= xfs_fs_dirty_inode,
1558 	.write_inode		= xfs_fs_write_inode,
1559 	.evict_inode		= xfs_fs_evict_inode,
1560 	.put_super		= xfs_fs_put_super,
1561 	.sync_fs		= xfs_fs_sync_fs,
1562 	.freeze_fs		= xfs_fs_freeze,
1563 	.unfreeze_fs		= xfs_fs_unfreeze,
1564 	.statfs			= xfs_fs_statfs,
1565 	.remount_fs		= xfs_fs_remount,
1566 	.show_options		= xfs_fs_show_options,
1567 };
1568 
1569 static struct file_system_type xfs_fs_type = {
1570 	.owner			= THIS_MODULE,
1571 	.name			= "xfs",
1572 	.mount			= xfs_fs_mount,
1573 	.kill_sb		= kill_block_super,
1574 	.fs_flags		= FS_REQUIRES_DEV,
1575 };
1576 
1577 STATIC int __init
xfs_init_zones(void)1578 xfs_init_zones(void)
1579 {
1580 
1581 	xfs_ioend_zone = kmem_zone_init(sizeof(xfs_ioend_t), "xfs_ioend");
1582 	if (!xfs_ioend_zone)
1583 		goto out;
1584 
1585 	xfs_ioend_pool = mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE,
1586 						  xfs_ioend_zone);
1587 	if (!xfs_ioend_pool)
1588 		goto out_destroy_ioend_zone;
1589 
1590 	xfs_log_ticket_zone = kmem_zone_init(sizeof(xlog_ticket_t),
1591 						"xfs_log_ticket");
1592 	if (!xfs_log_ticket_zone)
1593 		goto out_destroy_ioend_pool;
1594 
1595 	xfs_bmap_free_item_zone = kmem_zone_init(sizeof(xfs_bmap_free_item_t),
1596 						"xfs_bmap_free_item");
1597 	if (!xfs_bmap_free_item_zone)
1598 		goto out_destroy_log_ticket_zone;
1599 
1600 	xfs_btree_cur_zone = kmem_zone_init(sizeof(xfs_btree_cur_t),
1601 						"xfs_btree_cur");
1602 	if (!xfs_btree_cur_zone)
1603 		goto out_destroy_bmap_free_item_zone;
1604 
1605 	xfs_da_state_zone = kmem_zone_init(sizeof(xfs_da_state_t),
1606 						"xfs_da_state");
1607 	if (!xfs_da_state_zone)
1608 		goto out_destroy_btree_cur_zone;
1609 
1610 	xfs_dabuf_zone = kmem_zone_init(sizeof(xfs_dabuf_t), "xfs_dabuf");
1611 	if (!xfs_dabuf_zone)
1612 		goto out_destroy_da_state_zone;
1613 
1614 	xfs_ifork_zone = kmem_zone_init(sizeof(xfs_ifork_t), "xfs_ifork");
1615 	if (!xfs_ifork_zone)
1616 		goto out_destroy_dabuf_zone;
1617 
1618 	xfs_trans_zone = kmem_zone_init(sizeof(xfs_trans_t), "xfs_trans");
1619 	if (!xfs_trans_zone)
1620 		goto out_destroy_ifork_zone;
1621 
1622 	xfs_log_item_desc_zone =
1623 		kmem_zone_init(sizeof(struct xfs_log_item_desc),
1624 			       "xfs_log_item_desc");
1625 	if (!xfs_log_item_desc_zone)
1626 		goto out_destroy_trans_zone;
1627 
1628 	/*
1629 	 * The size of the zone allocated buf log item is the maximum
1630 	 * size possible under XFS.  This wastes a little bit of memory,
1631 	 * but it is much faster.
1632 	 */
1633 	xfs_buf_item_zone = kmem_zone_init((sizeof(xfs_buf_log_item_t) +
1634 				(((XFS_MAX_BLOCKSIZE / XFS_BLF_CHUNK) /
1635 				  NBWORD) * sizeof(int))), "xfs_buf_item");
1636 	if (!xfs_buf_item_zone)
1637 		goto out_destroy_log_item_desc_zone;
1638 
1639 	xfs_efd_zone = kmem_zone_init((sizeof(xfs_efd_log_item_t) +
1640 			((XFS_EFD_MAX_FAST_EXTENTS - 1) *
1641 				 sizeof(xfs_extent_t))), "xfs_efd_item");
1642 	if (!xfs_efd_zone)
1643 		goto out_destroy_buf_item_zone;
1644 
1645 	xfs_efi_zone = kmem_zone_init((sizeof(xfs_efi_log_item_t) +
1646 			((XFS_EFI_MAX_FAST_EXTENTS - 1) *
1647 				sizeof(xfs_extent_t))), "xfs_efi_item");
1648 	if (!xfs_efi_zone)
1649 		goto out_destroy_efd_zone;
1650 
1651 	xfs_inode_zone =
1652 		kmem_zone_init_flags(sizeof(xfs_inode_t), "xfs_inode",
1653 			KM_ZONE_HWALIGN | KM_ZONE_RECLAIM | KM_ZONE_SPREAD,
1654 			xfs_fs_inode_init_once);
1655 	if (!xfs_inode_zone)
1656 		goto out_destroy_efi_zone;
1657 
1658 	xfs_ili_zone =
1659 		kmem_zone_init_flags(sizeof(xfs_inode_log_item_t), "xfs_ili",
1660 					KM_ZONE_SPREAD, NULL);
1661 	if (!xfs_ili_zone)
1662 		goto out_destroy_inode_zone;
1663 
1664 	return 0;
1665 
1666  out_destroy_inode_zone:
1667 	kmem_zone_destroy(xfs_inode_zone);
1668  out_destroy_efi_zone:
1669 	kmem_zone_destroy(xfs_efi_zone);
1670  out_destroy_efd_zone:
1671 	kmem_zone_destroy(xfs_efd_zone);
1672  out_destroy_buf_item_zone:
1673 	kmem_zone_destroy(xfs_buf_item_zone);
1674  out_destroy_log_item_desc_zone:
1675 	kmem_zone_destroy(xfs_log_item_desc_zone);
1676  out_destroy_trans_zone:
1677 	kmem_zone_destroy(xfs_trans_zone);
1678  out_destroy_ifork_zone:
1679 	kmem_zone_destroy(xfs_ifork_zone);
1680  out_destroy_dabuf_zone:
1681 	kmem_zone_destroy(xfs_dabuf_zone);
1682  out_destroy_da_state_zone:
1683 	kmem_zone_destroy(xfs_da_state_zone);
1684  out_destroy_btree_cur_zone:
1685 	kmem_zone_destroy(xfs_btree_cur_zone);
1686  out_destroy_bmap_free_item_zone:
1687 	kmem_zone_destroy(xfs_bmap_free_item_zone);
1688  out_destroy_log_ticket_zone:
1689 	kmem_zone_destroy(xfs_log_ticket_zone);
1690  out_destroy_ioend_pool:
1691 	mempool_destroy(xfs_ioend_pool);
1692  out_destroy_ioend_zone:
1693 	kmem_zone_destroy(xfs_ioend_zone);
1694  out:
1695 	return -ENOMEM;
1696 }
1697 
1698 STATIC void
xfs_destroy_zones(void)1699 xfs_destroy_zones(void)
1700 {
1701 	kmem_zone_destroy(xfs_ili_zone);
1702 	kmem_zone_destroy(xfs_inode_zone);
1703 	kmem_zone_destroy(xfs_efi_zone);
1704 	kmem_zone_destroy(xfs_efd_zone);
1705 	kmem_zone_destroy(xfs_buf_item_zone);
1706 	kmem_zone_destroy(xfs_log_item_desc_zone);
1707 	kmem_zone_destroy(xfs_trans_zone);
1708 	kmem_zone_destroy(xfs_ifork_zone);
1709 	kmem_zone_destroy(xfs_dabuf_zone);
1710 	kmem_zone_destroy(xfs_da_state_zone);
1711 	kmem_zone_destroy(xfs_btree_cur_zone);
1712 	kmem_zone_destroy(xfs_bmap_free_item_zone);
1713 	kmem_zone_destroy(xfs_log_ticket_zone);
1714 	mempool_destroy(xfs_ioend_pool);
1715 	kmem_zone_destroy(xfs_ioend_zone);
1716 
1717 }
1718 
1719 STATIC int __init
xfs_init_workqueues(void)1720 xfs_init_workqueues(void)
1721 {
1722 	/*
1723 	 * max_active is set to 8 to give enough concurency to allow
1724 	 * multiple work operations on each CPU to run. This allows multiple
1725 	 * filesystems to be running sync work concurrently, and scales with
1726 	 * the number of CPUs in the system.
1727 	 */
1728 	xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_CPU_INTENSIVE, 8);
1729 	if (!xfs_syncd_wq)
1730 		goto out;
1731 
1732 	xfs_ail_wq = alloc_workqueue("xfsail", WQ_CPU_INTENSIVE, 8);
1733 	if (!xfs_ail_wq)
1734 		goto out_destroy_syncd;
1735 
1736 	return 0;
1737 
1738 out_destroy_syncd:
1739 	destroy_workqueue(xfs_syncd_wq);
1740 out:
1741 	return -ENOMEM;
1742 }
1743 
1744 STATIC void
xfs_destroy_workqueues(void)1745 xfs_destroy_workqueues(void)
1746 {
1747 	destroy_workqueue(xfs_ail_wq);
1748 	destroy_workqueue(xfs_syncd_wq);
1749 }
1750 
1751 STATIC int __init
init_xfs_fs(void)1752 init_xfs_fs(void)
1753 {
1754 	int			error;
1755 
1756 	printk(KERN_INFO XFS_VERSION_STRING " with "
1757 			 XFS_BUILD_OPTIONS " enabled\n");
1758 
1759 	xfs_ioend_init();
1760 	xfs_dir_startup();
1761 
1762 	error = xfs_init_zones();
1763 	if (error)
1764 		goto out;
1765 
1766 	error = xfs_init_workqueues();
1767 	if (error)
1768 		goto out_destroy_zones;
1769 
1770 	error = xfs_mru_cache_init();
1771 	if (error)
1772 		goto out_destroy_wq;
1773 
1774 	error = xfs_filestream_init();
1775 	if (error)
1776 		goto out_mru_cache_uninit;
1777 
1778 	error = xfs_buf_init();
1779 	if (error)
1780 		goto out_filestream_uninit;
1781 
1782 	error = xfs_init_procfs();
1783 	if (error)
1784 		goto out_buf_terminate;
1785 
1786 	error = xfs_sysctl_register();
1787 	if (error)
1788 		goto out_cleanup_procfs;
1789 
1790 	error = xfs_init_workqueues();
1791 	if (error)
1792 		goto out_sysctl_unregister;
1793 
1794 	vfs_initquota();
1795 
1796 	error = register_filesystem(&xfs_fs_type);
1797 	if (error)
1798 		goto out_sysctl_unregister;
1799 	return 0;
1800 
1801  out_sysctl_unregister:
1802 	xfs_sysctl_unregister();
1803  out_cleanup_procfs:
1804 	xfs_cleanup_procfs();
1805  out_buf_terminate:
1806 	xfs_buf_terminate();
1807  out_filestream_uninit:
1808 	xfs_filestream_uninit();
1809  out_mru_cache_uninit:
1810 	xfs_mru_cache_uninit();
1811  out_destroy_wq:
1812 	xfs_destroy_workqueues();
1813  out_destroy_zones:
1814 	xfs_destroy_zones();
1815  out:
1816 	return error;
1817 }
1818 
1819 STATIC void __exit
exit_xfs_fs(void)1820 exit_xfs_fs(void)
1821 {
1822 	vfs_exitquota();
1823 	unregister_filesystem(&xfs_fs_type);
1824 	xfs_sysctl_unregister();
1825 	xfs_cleanup_procfs();
1826 	xfs_buf_terminate();
1827 	xfs_filestream_uninit();
1828 	xfs_mru_cache_uninit();
1829 	xfs_destroy_workqueues();
1830 	xfs_destroy_zones();
1831 }
1832 
1833 module_init(init_xfs_fs);
1834 module_exit(exit_xfs_fs);
1835 
1836 MODULE_AUTHOR("Silicon Graphics, Inc.");
1837 MODULE_DESCRIPTION(XFS_VERSION_STRING " with " XFS_BUILD_OPTIONS " enabled");
1838 MODULE_LICENSE("GPL");
1839