1 /*
2  * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it would be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
11  *
12  * Further, this software is distributed without any warranty that it is
13  * free of the rightful claim of any third person regarding infringement
14  * or the like.	 Any license provided herein, whether implied or
15  * otherwise, applies only to this software file.  Patent licenses, if
16  * any, provided herein do not apply to combinations of this program with
17  * other software, or any other product whatsoever.
18  *
19  * You should have received a copy of the GNU General Public License along
20  * with this program; if not, write the Free Software Foundation, Inc., 59
21  * Temple Place - Suite 330, Boston MA 02111-1307, USA.
22  *
23  * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24  * Mountain View, CA  94043, or:
25  *
26  * http://www.sgi.com
27  *
28  * For further information regarding this notice, see:
29  *
30  * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
31  */
32 
33 #include "xfs.h"
34 #include "xfs_fs.h"
35 #include "xfs_inum.h"
36 #include "xfs_log.h"
37 #include "xfs_clnt.h"
38 #include "xfs_trans.h"
39 #include "xfs_sb.h"
40 #include "xfs_ag.h"
41 #include "xfs_dir.h"
42 #include "xfs_dir2.h"
43 #include "xfs_alloc.h"
44 #include "xfs_dmapi.h"
45 #include "xfs_quota.h"
46 #include "xfs_mount.h"
47 #include "xfs_alloc_btree.h"
48 #include "xfs_bmap_btree.h"
49 #include "xfs_ialloc_btree.h"
50 #include "xfs_btree.h"
51 #include "xfs_ialloc.h"
52 #include "xfs_attr_sf.h"
53 #include "xfs_dir_sf.h"
54 #include "xfs_dir2_sf.h"
55 #include "xfs_dinode.h"
56 #include "xfs_inode.h"
57 #include "xfs_bmap.h"
58 #include "xfs_bit.h"
59 #include "xfs_rtalloc.h"
60 #include "xfs_error.h"
61 #include "xfs_itable.h"
62 #include "xfs_rw.h"
63 #include "xfs_acl.h"
64 #include "xfs_cap.h"
65 #include "xfs_mac.h"
66 #include "xfs_attr.h"
67 #include "xfs_buf_item.h"
68 #include "xfs_trans_space.h"
69 #include "xfs_utils.h"
70 
71 #include "xfs_qm.h"
72 
73 /*
74  * The global quota manager. There is only one of these for the entire
75  * system, _not_ one per file system. XQM keeps track of the overall
76  * quota functionality, including maintaining the freelist and hash
77  * tables of dquots.
78  */
79 mutex_t xfs_Gqm_lock;
80 struct xfs_qm	*xfs_Gqm;
81 
82 kmem_zone_t	*qm_dqzone;
83 kmem_zone_t	*qm_dqtrxzone;
84 kmem_shaker_t	xfs_qm_shaker;
85 
86 STATIC void	xfs_qm_list_init(xfs_dqlist_t *, char *, int);
87 STATIC void	xfs_qm_list_destroy(xfs_dqlist_t *);
88 
89 STATIC int	xfs_qm_init_quotainos(xfs_mount_t *);
90 STATIC int	xfs_qm_shake(int, unsigned int);
91 
92 #ifdef DEBUG
93 extern mutex_t	qcheck_lock;
94 #endif
95 
96 #ifdef QUOTADEBUG
97 #define XQM_LIST_PRINT(l, NXT, title) \
98 { \
99 	xfs_dquot_t	*dqp; int i = 0; \
100 	cmn_err(CE_DEBUG, "%s (#%d)", title, (int) (l)->qh_nelems); \
101 	for (dqp = (l)->qh_next; dqp != NULL; dqp = dqp->NXT) { \
102 		cmn_err(CE_DEBUG, "   %d.  \"%d (%s)\"   " \
103 				  "bcnt = %d, icnt = %d, refs = %d", \
104 			++i, (int) INT_GET(dqp->q_core.d_id, ARCH_CONVERT), \
105 			DQFLAGTO_TYPESTR(dqp),	     \
106 			(int) INT_GET(dqp->q_core.d_bcount, ARCH_CONVERT), \
107 			(int) INT_GET(dqp->q_core.d_icount, ARCH_CONVERT), \
108 			(int) dqp->q_nrefs);  } \
109 }
110 #else
111 #define XQM_LIST_PRINT(l, NXT, title) do { } while (0)
112 #endif
113 
114 /*
115  * Initialize the XQM structure.
116  * Note that there is not one quota manager per file system.
117  */
118 STATIC struct xfs_qm *
xfs_Gqm_init(void)119 xfs_Gqm_init(void)
120 {
121 	xfs_qm_t		*xqm;
122 	int			hsize, i;
123 
124 	xqm = kmem_zalloc(sizeof(xfs_qm_t), KM_SLEEP);
125 	ASSERT(xqm);
126 
127 	/*
128 	 * Initialize the dquot hash tables.
129 	 */
130 	hsize = (DQUOT_HASH_HEURISTIC < XFS_QM_NCSIZE_THRESHOLD) ?
131 		XFS_QM_HASHSIZE_LOW : XFS_QM_HASHSIZE_HIGH;
132 	xqm->qm_dqhashmask = hsize - 1;
133 
134 	xqm->qm_usr_dqhtable = (xfs_dqhash_t *)kmem_zalloc(hsize *
135 						      sizeof(xfs_dqhash_t),
136 						      KM_SLEEP);
137 	xqm->qm_grp_dqhtable = (xfs_dqhash_t *)kmem_zalloc(hsize *
138 						      sizeof(xfs_dqhash_t),
139 						      KM_SLEEP);
140 	ASSERT(xqm->qm_usr_dqhtable != NULL);
141 	ASSERT(xqm->qm_grp_dqhtable != NULL);
142 
143 	for (i = 0; i < hsize; i++) {
144 		xfs_qm_list_init(&(xqm->qm_usr_dqhtable[i]), "uxdqh", i);
145 		xfs_qm_list_init(&(xqm->qm_grp_dqhtable[i]), "gxdqh", i);
146 	}
147 
148 	/*
149 	 * Freelist of all dquots of all file systems
150 	 */
151 	xfs_qm_freelist_init(&(xqm->qm_dqfreelist));
152 
153 	/*
154 	 * dquot zone. we register our own low-memory callback.
155 	 */
156 	if (!qm_dqzone) {
157 		xqm->qm_dqzone = kmem_zone_init(sizeof(xfs_dquot_t),
158 						"xfs_dquots");
159 		qm_dqzone = xqm->qm_dqzone;
160 	} else
161 		xqm->qm_dqzone = qm_dqzone;
162 
163 	xfs_qm_shaker = kmem_shake_register(xfs_qm_shake);
164 
165 	/*
166 	 * The t_dqinfo portion of transactions.
167 	 */
168 	if (!qm_dqtrxzone) {
169 		xqm->qm_dqtrxzone = kmem_zone_init(sizeof(xfs_dquot_acct_t),
170 						   "xfs_dqtrx");
171 		qm_dqtrxzone = xqm->qm_dqtrxzone;
172 	} else
173 		xqm->qm_dqtrxzone = qm_dqtrxzone;
174 
175 	atomic_set(&xqm->qm_totaldquots, 0);
176 	xqm->qm_dqfree_ratio = XFS_QM_DQFREE_RATIO;
177 	xqm->qm_nrefs = 0;
178 #ifdef DEBUG
179 	mutex_init(&qcheck_lock, MUTEX_DEFAULT, "qchk");
180 #endif
181 	return xqm;
182 }
183 
184 /*
185  * Destroy the global quota manager when its reference count goes to zero.
186  */
187 void
xfs_qm_destroy(struct xfs_qm * xqm)188 xfs_qm_destroy(
189 	struct xfs_qm	*xqm)
190 {
191 	int		hsize, i;
192 
193 	ASSERT(xqm != NULL);
194 	ASSERT(xqm->qm_nrefs == 0);
195 	kmem_shake_deregister(xfs_qm_shaker);
196 	hsize = xqm->qm_dqhashmask + 1;
197 	for (i = 0; i < hsize; i++) {
198 		xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i]));
199 		xfs_qm_list_destroy(&(xqm->qm_grp_dqhtable[i]));
200 	}
201 	kmem_free(xqm->qm_usr_dqhtable, hsize * sizeof(xfs_dqhash_t));
202 	kmem_free(xqm->qm_grp_dqhtable, hsize * sizeof(xfs_dqhash_t));
203 	xqm->qm_usr_dqhtable = NULL;
204 	xqm->qm_grp_dqhtable = NULL;
205 	xqm->qm_dqhashmask = 0;
206 	xfs_qm_freelist_destroy(&(xqm->qm_dqfreelist));
207 #ifdef DEBUG
208 	mutex_destroy(&qcheck_lock);
209 #endif
210 	kmem_free(xqm, sizeof(xfs_qm_t));
211 }
212 
213 /*
214  * Called at mount time to let XQM know that another file system is
215  * starting quotas. This isn't crucial information as the individual mount
216  * structures are pretty independent, but it helps the XQM keep a
217  * global view of what's going on.
218  */
219 /* ARGSUSED */
220 STATIC int
xfs_qm_hold_quotafs_ref(struct xfs_mount * mp)221 xfs_qm_hold_quotafs_ref(
222 	struct xfs_mount *mp)
223 {
224 	/*
225 	 * Need to lock the xfs_Gqm structure for things like this. For example,
226 	 * the structure could disappear between the entry to this routine and
227 	 * a HOLD operation if not locked.
228 	 */
229 	XFS_QM_LOCK(xfs_Gqm);
230 
231 	if (xfs_Gqm == NULL)
232 		xfs_Gqm = xfs_Gqm_init();
233 	/*
234 	 * We can keep a list of all filesystems with quotas mounted for
235 	 * debugging and statistical purposes, but ...
236 	 * Just take a reference and get out.
237 	 */
238 	XFS_QM_HOLD(xfs_Gqm);
239 	XFS_QM_UNLOCK(xfs_Gqm);
240 
241 	return 0;
242 }
243 
244 
245 /*
246  * Release the reference that a filesystem took at mount time,
247  * so that we know when we need to destroy the entire quota manager.
248  */
249 /* ARGSUSED */
250 STATIC void
xfs_qm_rele_quotafs_ref(struct xfs_mount * mp)251 xfs_qm_rele_quotafs_ref(
252 	struct xfs_mount *mp)
253 {
254 	xfs_dquot_t	*dqp, *nextdqp;
255 
256 	ASSERT(xfs_Gqm);
257 	ASSERT(xfs_Gqm->qm_nrefs > 0);
258 
259 	/*
260 	 * Go thru the freelist and destroy all inactive dquots.
261 	 */
262 	xfs_qm_freelist_lock(xfs_Gqm);
263 
264 	for (dqp = xfs_Gqm->qm_dqfreelist.qh_next;
265 	     dqp != (xfs_dquot_t *)&(xfs_Gqm->qm_dqfreelist); ) {
266 		xfs_dqlock(dqp);
267 		nextdqp = dqp->dq_flnext;
268 		if (dqp->dq_flags & XFS_DQ_INACTIVE) {
269 			ASSERT(dqp->q_mount == NULL);
270 			ASSERT(! XFS_DQ_IS_DIRTY(dqp));
271 			ASSERT(dqp->HL_PREVP == NULL);
272 			ASSERT(dqp->MPL_PREVP == NULL);
273 			XQM_FREELIST_REMOVE(dqp);
274 			xfs_dqunlock(dqp);
275 			xfs_qm_dqdestroy(dqp);
276 		} else {
277 			xfs_dqunlock(dqp);
278 		}
279 		dqp = nextdqp;
280 	}
281 	xfs_qm_freelist_unlock(xfs_Gqm);
282 
283 	/*
284 	 * Destroy the entire XQM. If somebody mounts with quotaon, this'll
285 	 * be restarted.
286 	 */
287 	XFS_QM_LOCK(xfs_Gqm);
288 	XFS_QM_RELE(xfs_Gqm);
289 	if (xfs_Gqm->qm_nrefs == 0) {
290 		xfs_qm_destroy(xfs_Gqm);
291 		xfs_Gqm = NULL;
292 	}
293 	XFS_QM_UNLOCK(xfs_Gqm);
294 }
295 
296 /*
297  * This is called at mount time from xfs_mountfs to initialize the quotainfo
298  * structure and start the global quotamanager (xfs_Gqm) if it hasn't done
299  * so already.	Note that the superblock has not been read in yet.
300  */
301 void
xfs_qm_mount_quotainit(xfs_mount_t * mp,uint flags)302 xfs_qm_mount_quotainit(
303 	xfs_mount_t	*mp,
304 	uint		flags)
305 {
306 	/*
307 	 * User or group quotas has to be on.
308 	 */
309 	ASSERT(flags & (XFSMNT_UQUOTA | XFSMNT_GQUOTA));
310 
311 	/*
312 	 * Initialize the flags in the mount structure. From this point
313 	 * onwards we look at m_qflags to figure out if quotas's ON/OFF, etc.
314 	 * Note that we enforce nothing if accounting is off.
315 	 * ie.	XFSMNT_*QUOTA must be ON for XFSMNT_*QUOTAENF.
316 	 * It isn't necessary to take the quotaoff lock to do this; this is
317 	 * called from mount.
318 	 */
319 	if (flags & XFSMNT_UQUOTA) {
320 		mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE);
321 		if (flags & XFSMNT_UQUOTAENF)
322 			mp->m_qflags |= XFS_UQUOTA_ENFD;
323 	}
324 	if (flags & XFSMNT_GQUOTA) {
325 		mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
326 		if (flags & XFSMNT_GQUOTAENF)
327 			mp->m_qflags |= XFS_GQUOTA_ENFD;
328 	}
329 }
330 
331 /*
332  * Just destroy the quotainfo structure.
333  */
334 void
xfs_qm_unmount_quotadestroy(xfs_mount_t * mp)335 xfs_qm_unmount_quotadestroy(
336 	xfs_mount_t	*mp)
337 {
338 	if (mp->m_quotainfo)
339 		xfs_qm_destroy_quotainfo(mp);
340 }
341 
342 
343 /*
344  * This is called from xfs_mountfs to start quotas and initialize all
345  * necessary data structures like quotainfo.  This is also responsible for
346  * running a quotacheck as necessary.  We are guaranteed that the superblock
347  * is consistently read in at this point.
348  */
349 int
xfs_qm_mount_quotas(xfs_mount_t * mp,int mfsi_flags)350 xfs_qm_mount_quotas(
351 	xfs_mount_t	*mp,
352 	int		mfsi_flags)
353 {
354 	unsigned long	s;
355 	int		error = 0;
356 	uint		sbf;
357 
358 	/*
359 	 * If a file system had quotas running earlier, but decided to
360 	 * mount without -o quota/uquota/gquota options, revoke the
361 	 * quotachecked license, and bail out.
362 	 */
363 	if (! XFS_IS_QUOTA_ON(mp) &&
364 	    (mp->m_sb.sb_qflags & (XFS_UQUOTA_ACCT|XFS_GQUOTA_ACCT))) {
365 		mp->m_qflags = 0;
366 		goto write_changes;
367 	}
368 
369 	/*
370 	 * If quotas on realtime volumes is not supported, we disable
371 	 * quotas immediately.
372 	 */
373 	if (mp->m_sb.sb_rextents) {
374 		cmn_err(CE_NOTE,
375 			"Cannot turn on quotas for realtime filesystem %s",
376 			mp->m_fsname);
377 		mp->m_qflags = 0;
378 		goto write_changes;
379 	}
380 
381 #if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
382 	cmn_err(CE_NOTE, "Attempting to turn on disk quotas.");
383 #endif
384 
385 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
386 	/*
387 	 * Allocate the quotainfo structure inside the mount struct, and
388 	 * create quotainode(s), and change/rev superblock if necessary.
389 	 */
390 	if ((error = xfs_qm_init_quotainfo(mp))) {
391 		/*
392 		 * We must turn off quotas.
393 		 */
394 		ASSERT(mp->m_quotainfo == NULL);
395 		mp->m_qflags = 0;
396 		goto write_changes;
397 	}
398 	/*
399 	 * If any of the quotas are not consistent, do a quotacheck.
400 	 */
401 	if (XFS_QM_NEED_QUOTACHECK(mp) &&
402 		!(mfsi_flags & XFS_MFSI_NO_QUOTACHECK)) {
403 #ifdef DEBUG
404 		cmn_err(CE_NOTE, "Doing a quotacheck. Please wait.");
405 #endif
406 		if ((error = xfs_qm_quotacheck(mp))) {
407 			/* Quotacheck has failed and quotas have
408 			 * been disabled.
409 			 */
410 			return XFS_ERROR(error);
411 		}
412 #ifdef DEBUG
413 		cmn_err(CE_NOTE, "Done quotacheck.");
414 #endif
415 	}
416  write_changes:
417 	/*
418 	 * We actually don't have to acquire the SB_LOCK at all.
419 	 * This can only be called from mount, and that's single threaded. XXX
420 	 */
421 	s = XFS_SB_LOCK(mp);
422 	sbf = mp->m_sb.sb_qflags;
423 	mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
424 	XFS_SB_UNLOCK(mp, s);
425 
426 	if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
427 		if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) {
428 			/*
429 			 * We could only have been turning quotas off.
430 			 * We aren't in very good shape actually because
431 			 * the incore structures are convinced that quotas are
432 			 * off, but the on disk superblock doesn't know that !
433 			 */
434 			ASSERT(!(XFS_IS_QUOTA_RUNNING(mp)));
435 			xfs_fs_cmn_err(CE_ALERT, mp,
436 				"XFS mount_quotas: Superblock update failed!");
437 		}
438 	}
439 
440 	if (error) {
441 		xfs_fs_cmn_err(CE_WARN, mp,
442 			"Failed to initialize disk quotas.");
443 	}
444 	return XFS_ERROR(error);
445 }
446 
447 /*
448  * Called from the vfsops layer.
449  */
450 int
xfs_qm_unmount_quotas(xfs_mount_t * mp)451 xfs_qm_unmount_quotas(
452 	xfs_mount_t	*mp)
453 {
454 	xfs_inode_t	*uqp, *gqp;
455 	int		error = 0;
456 
457 	/*
458 	 * Release the dquots that root inode, et al might be holding,
459 	 * before we flush quotas and blow away the quotainfo structure.
460 	 */
461 	ASSERT(mp->m_rootip);
462 	xfs_qm_dqdetach(mp->m_rootip);
463 	if (mp->m_rbmip)
464 		xfs_qm_dqdetach(mp->m_rbmip);
465 	if (mp->m_rsumip)
466 		xfs_qm_dqdetach(mp->m_rsumip);
467 
468 	/*
469 	 * Flush out the quota inodes.
470 	 */
471 	uqp = gqp = NULL;
472 	if (mp->m_quotainfo) {
473 		if ((uqp = mp->m_quotainfo->qi_uquotaip) != NULL) {
474 			xfs_ilock(uqp, XFS_ILOCK_EXCL);
475 			xfs_iflock(uqp);
476 			error = xfs_iflush(uqp, XFS_IFLUSH_SYNC);
477 			xfs_iunlock(uqp, XFS_ILOCK_EXCL);
478 			if (unlikely(error == EFSCORRUPTED)) {
479 				XFS_ERROR_REPORT("xfs_qm_unmount_quotas(1)",
480 						 XFS_ERRLEVEL_LOW, mp);
481 				goto out;
482 			}
483 		}
484 		if ((gqp = mp->m_quotainfo->qi_gquotaip) != NULL) {
485 			xfs_ilock(gqp, XFS_ILOCK_EXCL);
486 			xfs_iflock(gqp);
487 			error = xfs_iflush(gqp, XFS_IFLUSH_SYNC);
488 			xfs_iunlock(gqp, XFS_ILOCK_EXCL);
489 			if (unlikely(error == EFSCORRUPTED)) {
490 				XFS_ERROR_REPORT("xfs_qm_unmount_quotas(2)",
491 						 XFS_ERRLEVEL_LOW, mp);
492 				goto out;
493 			}
494 		}
495 	}
496 	if (uqp) {
497 		 XFS_PURGE_INODE(uqp);
498 		 mp->m_quotainfo->qi_uquotaip = NULL;
499 	}
500 	if (gqp) {
501 		XFS_PURGE_INODE(gqp);
502 		mp->m_quotainfo->qi_gquotaip = NULL;
503 	}
504 out:
505 	return XFS_ERROR(error);
506 }
507 
508 /*
509  * Flush all dquots of the given file system to disk. The dquots are
510  * _not_ purged from memory here, just their data written to disk.
511  */
512 int
xfs_qm_dqflush_all(xfs_mount_t * mp,int flags)513 xfs_qm_dqflush_all(
514 	xfs_mount_t	*mp,
515 	int		flags)
516 {
517 	int		recl;
518 	xfs_dquot_t	*dqp;
519 	int		niters;
520 	int		error;
521 
522 	if (mp->m_quotainfo == NULL)
523 		return (0);
524 	niters = 0;
525 again:
526 	xfs_qm_mplist_lock(mp);
527 	FOREACH_DQUOT_IN_MP(dqp, mp) {
528 		xfs_dqlock(dqp);
529 		if (! XFS_DQ_IS_DIRTY(dqp)) {
530 			xfs_dqunlock(dqp);
531 			continue;
532 		}
533 		xfs_dqtrace_entry(dqp, "FLUSHALL: DQDIRTY");
534 		/* XXX a sentinel would be better */
535 		recl = XFS_QI_MPLRECLAIMS(mp);
536 		if (! xfs_qm_dqflock_nowait(dqp)) {
537 			/*
538 			 * If we can't grab the flush lock then check
539 			 * to see if the dquot has been flushed delayed
540 			 * write.  If so, grab its buffer and send it
541 			 * out immediately.  We'll be able to acquire
542 			 * the flush lock when the I/O completes.
543 			 */
544 			xfs_qm_dqflock_pushbuf_wait(dqp);
545 		}
546 		/*
547 		 * Let go of the mplist lock. We don't want to hold it
548 		 * across a disk write.
549 		 */
550 		xfs_qm_mplist_unlock(mp);
551 		error = xfs_qm_dqflush(dqp, flags);
552 		xfs_dqunlock(dqp);
553 		if (error)
554 			return (error);
555 
556 		xfs_qm_mplist_lock(mp);
557 		if (recl != XFS_QI_MPLRECLAIMS(mp)) {
558 			xfs_qm_mplist_unlock(mp);
559 			/* XXX restart limit */
560 			goto again;
561 		}
562 	}
563 
564 	xfs_qm_mplist_unlock(mp);
565 	/* return ! busy */
566 	return (0);
567 }
568 /*
569  * Release the group dquot pointers the user dquots may be
570  * carrying around as a hint. mplist is locked on entry and exit.
571  */
572 STATIC void
xfs_qm_detach_gdquots(xfs_mount_t * mp)573 xfs_qm_detach_gdquots(
574 	xfs_mount_t	*mp)
575 {
576 	xfs_dquot_t	*dqp, *gdqp;
577 	int		nrecl;
578 
579  again:
580 	ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp));
581 	dqp = XFS_QI_MPLNEXT(mp);
582 	while (dqp) {
583 		xfs_dqlock(dqp);
584 		if ((gdqp = dqp->q_gdquot)) {
585 			xfs_dqlock(gdqp);
586 			dqp->q_gdquot = NULL;
587 		}
588 		xfs_dqunlock(dqp);
589 
590 		if (gdqp) {
591 			/*
592 			 * Can't hold the mplist lock across a dqput.
593 			 * XXXmust convert to marker based iterations here.
594 			 */
595 			nrecl = XFS_QI_MPLRECLAIMS(mp);
596 			xfs_qm_mplist_unlock(mp);
597 			xfs_qm_dqput(gdqp);
598 
599 			xfs_qm_mplist_lock(mp);
600 			if (nrecl != XFS_QI_MPLRECLAIMS(mp))
601 				goto again;
602 		}
603 		dqp = dqp->MPL_NEXT;
604 	}
605 }
606 
607 /*
608  * Go through all the incore dquots of this file system and take them
609  * off the mplist and hashlist, if the dquot type matches the dqtype
610  * parameter. This is used when turning off quota accounting for
611  * users and/or groups, as well as when the filesystem is unmounting.
612  */
613 STATIC int
xfs_qm_dqpurge_int(xfs_mount_t * mp,uint flags)614 xfs_qm_dqpurge_int(
615 	xfs_mount_t	*mp,
616 	uint		flags) /* QUOTAOFF/UMOUNTING/UQUOTA/GQUOTA */
617 {
618 	xfs_dquot_t	*dqp;
619 	uint		dqtype;
620 	int		nrecl;
621 	xfs_dquot_t	*nextdqp;
622 	int		nmisses;
623 
624 	if (mp->m_quotainfo == NULL)
625 		return (0);
626 
627 	dqtype = (flags & XFS_QMOPT_UQUOTA) ? XFS_DQ_USER : 0;
628 	dqtype |= (flags & XFS_QMOPT_GQUOTA) ? XFS_DQ_GROUP : 0;
629 
630 	xfs_qm_mplist_lock(mp);
631 
632 	/*
633 	 * In the first pass through all incore dquots of this filesystem,
634 	 * we release the group dquot pointers the user dquots may be
635 	 * carrying around as a hint. We need to do this irrespective of
636 	 * what's being turned off.
637 	 */
638 	xfs_qm_detach_gdquots(mp);
639 
640       again:
641 	nmisses = 0;
642 	ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp));
643 	/*
644 	 * Try to get rid of all of the unwanted dquots. The idea is to
645 	 * get them off mplist and hashlist, but leave them on freelist.
646 	 */
647 	dqp = XFS_QI_MPLNEXT(mp);
648 	while (dqp) {
649 		/*
650 		 * It's OK to look at the type without taking dqlock here.
651 		 * We're holding the mplist lock here, and that's needed for
652 		 * a dqreclaim.
653 		 */
654 		if ((dqp->dq_flags & dqtype) == 0) {
655 			dqp = dqp->MPL_NEXT;
656 			continue;
657 		}
658 
659 		if (! xfs_qm_dqhashlock_nowait(dqp)) {
660 			nrecl = XFS_QI_MPLRECLAIMS(mp);
661 			xfs_qm_mplist_unlock(mp);
662 			XFS_DQ_HASH_LOCK(dqp->q_hash);
663 			xfs_qm_mplist_lock(mp);
664 
665 			/*
666 			 * XXXTheoretically, we can get into a very long
667 			 * ping pong game here.
668 			 * No one can be adding dquots to the mplist at
669 			 * this point, but somebody might be taking things off.
670 			 */
671 			if (nrecl != XFS_QI_MPLRECLAIMS(mp)) {
672 				XFS_DQ_HASH_UNLOCK(dqp->q_hash);
673 				goto again;
674 			}
675 		}
676 
677 		/*
678 		 * Take the dquot off the mplist and hashlist. It may remain on
679 		 * freelist in INACTIVE state.
680 		 */
681 		nextdqp = dqp->MPL_NEXT;
682 		nmisses += xfs_qm_dqpurge(dqp, flags);
683 		dqp = nextdqp;
684 	}
685 	xfs_qm_mplist_unlock(mp);
686 	return nmisses;
687 }
688 
689 int
xfs_qm_dqpurge_all(xfs_mount_t * mp,uint flags)690 xfs_qm_dqpurge_all(
691 	xfs_mount_t	*mp,
692 	uint		flags)
693 {
694 	int		ndquots;
695 
696 	/*
697 	 * Purge the dquot cache.
698 	 * None of the dquots should really be busy at this point.
699 	 */
700 	if (mp->m_quotainfo) {
701 		while ((ndquots = xfs_qm_dqpurge_int(mp, flags))) {
702 			delay(ndquots * 10);
703 		}
704 	}
705 	return 0;
706 }
707 
708 STATIC int
xfs_qm_dqattach_one(xfs_inode_t * ip,xfs_dqid_t id,uint type,uint doalloc,uint dolock,xfs_dquot_t * udqhint,xfs_dquot_t ** IO_idqpp)709 xfs_qm_dqattach_one(
710 	xfs_inode_t	*ip,
711 	xfs_dqid_t	id,
712 	uint		type,
713 	uint		doalloc,
714 	uint		dolock,
715 	xfs_dquot_t	*udqhint, /* hint */
716 	xfs_dquot_t	**IO_idqpp)
717 {
718 	xfs_dquot_t	*dqp;
719 	int		error;
720 
721 	ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
722 	error = 0;
723 	/*
724 	 * See if we already have it in the inode itself. IO_idqpp is
725 	 * &i_udquot or &i_gdquot. This made the code look weird, but
726 	 * made the logic a lot simpler.
727 	 */
728 	if ((dqp = *IO_idqpp)) {
729 		if (dolock)
730 			xfs_dqlock(dqp);
731 		xfs_dqtrace_entry(dqp, "DQATTACH: found in ip");
732 		goto done;
733 	}
734 
735 	/*
736 	 * udqhint is the i_udquot field in inode, and is non-NULL only
737 	 * when the type arg is XFS_DQ_GROUP. Its purpose is to save a
738 	 * lookup by dqid (xfs_qm_dqget) by caching a group dquot inside
739 	 * the user dquot.
740 	 */
741 	ASSERT(!udqhint || type == XFS_DQ_GROUP);
742 	if (udqhint && !dolock)
743 		xfs_dqlock(udqhint);
744 
745 	/*
746 	 * No need to take dqlock to look at the id.
747 	 * The ID can't change until it gets reclaimed, and it won't
748 	 * be reclaimed as long as we have a ref from inode and we hold
749 	 * the ilock.
750 	 */
751 	if (udqhint &&
752 	    (dqp = udqhint->q_gdquot) &&
753 	    (INT_GET(dqp->q_core.d_id, ARCH_CONVERT) == id)) {
754 		ASSERT(XFS_DQ_IS_LOCKED(udqhint));
755 		xfs_dqlock(dqp);
756 		XFS_DQHOLD(dqp);
757 		ASSERT(*IO_idqpp == NULL);
758 		*IO_idqpp = dqp;
759 		if (!dolock) {
760 			xfs_dqunlock(dqp);
761 			xfs_dqunlock(udqhint);
762 		}
763 		goto done;
764 	}
765 	/*
766 	 * We can't hold a dquot lock when we call the dqget code.
767 	 * We'll deadlock in no time, because of (not conforming to)
768 	 * lock ordering - the inodelock comes before any dquot lock,
769 	 * and we may drop and reacquire the ilock in xfs_qm_dqget().
770 	 */
771 	if (udqhint)
772 		xfs_dqunlock(udqhint);
773 	/*
774 	 * Find the dquot from somewhere. This bumps the
775 	 * reference count of dquot and returns it locked.
776 	 * This can return ENOENT if dquot didn't exist on
777 	 * disk and we didn't ask it to allocate;
778 	 * ESRCH if quotas got turned off suddenly.
779 	 */
780 	if ((error = xfs_qm_dqget(ip->i_mount, ip, id, type,
781 				 doalloc|XFS_QMOPT_DOWARN, &dqp))) {
782 		if (udqhint && dolock)
783 			xfs_dqlock(udqhint);
784 		goto done;
785 	}
786 
787 	xfs_dqtrace_entry(dqp, "DQATTACH: found by dqget");
788 	/*
789 	 * dqget may have dropped and re-acquired the ilock, but it guarantees
790 	 * that the dquot returned is the one that should go in the inode.
791 	 */
792 	*IO_idqpp = dqp;
793 	ASSERT(dqp);
794 	ASSERT(XFS_DQ_IS_LOCKED(dqp));
795 	if (! dolock) {
796 		xfs_dqunlock(dqp);
797 		goto done;
798 	}
799 	if (! udqhint)
800 		goto done;
801 
802 	ASSERT(udqhint);
803 	ASSERT(dolock);
804 	ASSERT(XFS_DQ_IS_LOCKED(dqp));
805 	if (! xfs_qm_dqlock_nowait(udqhint)) {
806 		xfs_dqunlock(dqp);
807 		xfs_dqlock(udqhint);
808 		xfs_dqlock(dqp);
809 	}
810       done:
811 #ifdef QUOTADEBUG
812 	if (udqhint) {
813 		if (dolock)
814 			ASSERT(XFS_DQ_IS_LOCKED(udqhint));
815 	}
816 	if (! error) {
817 		if (dolock)
818 			ASSERT(XFS_DQ_IS_LOCKED(dqp));
819 	}
820 #endif
821 	return (error);
822 }
823 
824 
825 /*
826  * Given a udquot and gdquot, attach a ptr to the group dquot in the
827  * udquot as a hint for future lookups. The idea sounds simple, but the
828  * execution isn't, because the udquot might have a group dquot attached
829  * already and getting rid of that gets us into lock ordering contraints.
830  * The process is complicated more by the fact that the dquots may or may not
831  * be locked on entry.
832  */
833 STATIC void
xfs_qm_dqattach_grouphint(xfs_dquot_t * udq,xfs_dquot_t * gdq,uint locked)834 xfs_qm_dqattach_grouphint(
835 	xfs_dquot_t	*udq,
836 	xfs_dquot_t	*gdq,
837 	uint		locked)
838 {
839 	xfs_dquot_t	*tmp;
840 
841 #ifdef QUOTADEBUG
842 	if (locked) {
843 		ASSERT(XFS_DQ_IS_LOCKED(udq));
844 		ASSERT(XFS_DQ_IS_LOCKED(gdq));
845 	}
846 #endif
847 	if (! locked)
848 		xfs_dqlock(udq);
849 
850 	if ((tmp = udq->q_gdquot)) {
851 		if (tmp == gdq) {
852 			if (! locked)
853 				xfs_dqunlock(udq);
854 			return;
855 		}
856 
857 		udq->q_gdquot = NULL;
858 		/*
859 		 * We can't keep any dqlocks when calling dqrele,
860 		 * because the freelist lock comes before dqlocks.
861 		 */
862 		xfs_dqunlock(udq);
863 		if (locked)
864 			xfs_dqunlock(gdq);
865 		/*
866 		 * we took a hard reference once upon a time in dqget,
867 		 * so give it back when the udquot no longer points at it
868 		 * dqput() does the unlocking of the dquot.
869 		 */
870 		xfs_qm_dqrele(tmp);
871 
872 		xfs_dqlock(udq);
873 		xfs_dqlock(gdq);
874 
875 	} else {
876 		ASSERT(XFS_DQ_IS_LOCKED(udq));
877 		if (! locked) {
878 			xfs_dqlock(gdq);
879 		}
880 	}
881 
882 	ASSERT(XFS_DQ_IS_LOCKED(udq));
883 	ASSERT(XFS_DQ_IS_LOCKED(gdq));
884 	/*
885 	 * Somebody could have attached a gdquot here,
886 	 * when we dropped the uqlock. If so, just do nothing.
887 	 */
888 	if (udq->q_gdquot == NULL) {
889 		XFS_DQHOLD(gdq);
890 		udq->q_gdquot = gdq;
891 	}
892 	if (! locked) {
893 		xfs_dqunlock(gdq);
894 		xfs_dqunlock(udq);
895 	}
896 }
897 
898 
899 /*
900  * Given a locked inode, attach dquot(s) to it, taking UQUOTAON / GQUOTAON
901  * in to account.
902  * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed.
903  * If XFS_QMOPT_DQLOCK, the dquot(s) will be returned locked. This option pretty
904  * much made this code a complete mess, but it has been pretty useful.
905  * If XFS_QMOPT_ILOCKED, then inode sent is already locked EXCL.
906  * Inode may get unlocked and relocked in here, and the caller must deal with
907  * the consequences.
908  */
909 int
xfs_qm_dqattach(xfs_inode_t * ip,uint flags)910 xfs_qm_dqattach(
911 	xfs_inode_t	*ip,
912 	uint		flags)
913 {
914 	xfs_mount_t	*mp = ip->i_mount;
915 	uint		nquotas = 0;
916 	int		error = 0;
917 
918 	if ((! XFS_IS_QUOTA_ON(mp)) ||
919 	    (! XFS_NOT_DQATTACHED(mp, ip)) ||
920 	    (ip->i_ino == mp->m_sb.sb_uquotino) ||
921 	    (ip->i_ino == mp->m_sb.sb_gquotino))
922 		return (0);
923 
924 	ASSERT((flags & XFS_QMOPT_ILOCKED) == 0 ||
925 	       XFS_ISLOCKED_INODE_EXCL(ip));
926 
927 	if (! (flags & XFS_QMOPT_ILOCKED))
928 		xfs_ilock(ip, XFS_ILOCK_EXCL);
929 
930 	if (XFS_IS_UQUOTA_ON(mp)) {
931 		error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER,
932 						flags & XFS_QMOPT_DQALLOC,
933 						flags & XFS_QMOPT_DQLOCK,
934 						NULL, &ip->i_udquot);
935 		if (error)
936 			goto done;
937 		nquotas++;
938 	}
939 	ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
940 	if (XFS_IS_GQUOTA_ON(mp)) {
941 		error = xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
942 						flags & XFS_QMOPT_DQALLOC,
943 						flags & XFS_QMOPT_DQLOCK,
944 						ip->i_udquot, &ip->i_gdquot);
945 		/*
946 		 * Don't worry about the udquot that we may have
947 		 * attached above. It'll get detached, if not already.
948 		 */
949 		if (error)
950 			goto done;
951 		nquotas++;
952 	}
953 
954 	/*
955 	 * Attach this group quota to the user quota as a hint.
956 	 * This WON'T, in general, result in a thrash.
957 	 */
958 	if (nquotas == 2) {
959 		ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
960 		ASSERT(ip->i_udquot);
961 		ASSERT(ip->i_gdquot);
962 
963 		/*
964 		 * We may or may not have the i_udquot locked at this point,
965 		 * but this check is OK since we don't depend on the i_gdquot to
966 		 * be accurate 100% all the time. It is just a hint, and this
967 		 * will succeed in general.
968 		 */
969 		if (ip->i_udquot->q_gdquot == ip->i_gdquot)
970 			goto done;
971 		/*
972 		 * Attach i_gdquot to the gdquot hint inside the i_udquot.
973 		 */
974 		xfs_qm_dqattach_grouphint(ip->i_udquot, ip->i_gdquot,
975 					 flags & XFS_QMOPT_DQLOCK);
976 	}
977 
978       done:
979 
980 #ifdef QUOTADEBUG
981 	if (! error) {
982 		if (ip->i_udquot) {
983 			if (flags & XFS_QMOPT_DQLOCK)
984 				ASSERT(XFS_DQ_IS_LOCKED(ip->i_udquot));
985 		}
986 		if (ip->i_gdquot) {
987 			if (flags & XFS_QMOPT_DQLOCK)
988 				ASSERT(XFS_DQ_IS_LOCKED(ip->i_gdquot));
989 		}
990 		if (XFS_IS_UQUOTA_ON(mp))
991 			ASSERT(ip->i_udquot);
992 		if (XFS_IS_GQUOTA_ON(mp))
993 			ASSERT(ip->i_gdquot);
994 	}
995 #endif
996 
997 	if (! (flags & XFS_QMOPT_ILOCKED))
998 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
999 
1000 #ifdef QUOTADEBUG
1001 	else
1002 		ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
1003 #endif
1004 	return (error);
1005 }
1006 
1007 /*
1008  * Release dquots (and their references) if any.
1009  * The inode should be locked EXCL except when this's called by
1010  * xfs_ireclaim.
1011  */
1012 void
xfs_qm_dqdetach(xfs_inode_t * ip)1013 xfs_qm_dqdetach(
1014 	xfs_inode_t	*ip)
1015 {
1016 	if (!(ip->i_udquot || ip->i_gdquot))
1017 		return;
1018 
1019 	ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_uquotino);
1020 	ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_gquotino);
1021 	if (ip->i_udquot)
1022 		xfs_dqtrace_entry_ino(ip->i_udquot, "DQDETTACH", ip);
1023 	if (ip->i_udquot) {
1024 		xfs_qm_dqrele(ip->i_udquot);
1025 		ip->i_udquot = NULL;
1026 	}
1027 	if (ip->i_gdquot) {
1028 		xfs_qm_dqrele(ip->i_gdquot);
1029 		ip->i_gdquot = NULL;
1030 	}
1031 }
1032 
1033 /*
1034  * This is called by VFS_SYNC and flags arg determines the caller,
1035  * and its motives, as done in xfs_sync.
1036  *
1037  * vfs_sync: SYNC_FSDATA|SYNC_ATTR|SYNC_BDFLUSH 0x31
1038  * syscall sync: SYNC_FSDATA|SYNC_ATTR|SYNC_DELWRI 0x25
1039  * umountroot : SYNC_WAIT | SYNC_CLOSE | SYNC_ATTR | SYNC_FSDATA
1040  */
1041 
1042 int
xfs_qm_sync(xfs_mount_t * mp,short flags)1043 xfs_qm_sync(
1044 	xfs_mount_t	*mp,
1045 	short		flags)
1046 {
1047 	int		recl, restarts;
1048 	xfs_dquot_t	*dqp;
1049 	uint		flush_flags;
1050 	boolean_t	nowait;
1051 	int		error;
1052 
1053 	restarts = 0;
1054 	/*
1055 	 * We won't block unless we are asked to.
1056 	 */
1057 	nowait = (boolean_t)(flags & SYNC_BDFLUSH || (flags & SYNC_WAIT) == 0);
1058 
1059   again:
1060 	xfs_qm_mplist_lock(mp);
1061 	/*
1062 	 * dqpurge_all() also takes the mplist lock and iterate thru all dquots
1063 	 * in quotaoff. However, if the QUOTA_ACTIVE bits are not cleared
1064 	 * when we have the mplist lock, we know that dquots will be consistent
1065 	 * as long as we have it locked.
1066 	 */
1067 	if (! XFS_IS_QUOTA_ON(mp)) {
1068 		xfs_qm_mplist_unlock(mp);
1069 		return (0);
1070 	}
1071 	FOREACH_DQUOT_IN_MP(dqp, mp) {
1072 		/*
1073 		 * If this is vfs_sync calling, then skip the dquots that
1074 		 * don't 'seem' to be dirty. ie. don't acquire dqlock.
1075 		 * This is very similar to what xfs_sync does with inodes.
1076 		 */
1077 		if (flags & SYNC_BDFLUSH) {
1078 			if (! XFS_DQ_IS_DIRTY(dqp))
1079 				continue;
1080 		}
1081 
1082 		if (nowait) {
1083 			/*
1084 			 * Try to acquire the dquot lock. We are NOT out of
1085 			 * lock order, but we just don't want to wait for this
1086 			 * lock, unless somebody wanted us to.
1087 			 */
1088 			if (! xfs_qm_dqlock_nowait(dqp))
1089 				continue;
1090 		} else {
1091 			xfs_dqlock(dqp);
1092 		}
1093 
1094 		/*
1095 		 * Now, find out for sure if this dquot is dirty or not.
1096 		 */
1097 		if (! XFS_DQ_IS_DIRTY(dqp)) {
1098 			xfs_dqunlock(dqp);
1099 			continue;
1100 		}
1101 
1102 		/* XXX a sentinel would be better */
1103 		recl = XFS_QI_MPLRECLAIMS(mp);
1104 		if (! xfs_qm_dqflock_nowait(dqp)) {
1105 			if (nowait) {
1106 				xfs_dqunlock(dqp);
1107 				continue;
1108 			}
1109 			/*
1110 			 * If we can't grab the flush lock then if the caller
1111 			 * really wanted us to give this our best shot,
1112 			 * see if we can give a push to the buffer before we wait
1113 			 * on the flush lock. At this point, we know that
1114 			 * eventhough the dquot is being flushed,
1115 			 * it has (new) dirty data.
1116 			 */
1117 			xfs_qm_dqflock_pushbuf_wait(dqp);
1118 		}
1119 		/*
1120 		 * Let go of the mplist lock. We don't want to hold it
1121 		 * across a disk write
1122 		 */
1123 		flush_flags = (nowait) ? XFS_QMOPT_DELWRI : XFS_QMOPT_SYNC;
1124 		xfs_qm_mplist_unlock(mp);
1125 		xfs_dqtrace_entry(dqp, "XQM_SYNC: DQFLUSH");
1126 		error = xfs_qm_dqflush(dqp, flush_flags);
1127 		xfs_dqunlock(dqp);
1128 		if (error && XFS_FORCED_SHUTDOWN(mp))
1129 			return(0);	/* Need to prevent umount failure */
1130 		else if (error)
1131 			return (error);
1132 
1133 		xfs_qm_mplist_lock(mp);
1134 		if (recl != XFS_QI_MPLRECLAIMS(mp)) {
1135 			if (++restarts >= XFS_QM_SYNC_MAX_RESTARTS)
1136 				break;
1137 
1138 			xfs_qm_mplist_unlock(mp);
1139 			goto again;
1140 		}
1141 	}
1142 
1143 	xfs_qm_mplist_unlock(mp);
1144 	return (0);
1145 }
1146 
1147 
1148 /*
1149  * This initializes all the quota information that's kept in the
1150  * mount structure
1151  */
1152 int
xfs_qm_init_quotainfo(xfs_mount_t * mp)1153 xfs_qm_init_quotainfo(
1154 	xfs_mount_t	*mp)
1155 {
1156 	xfs_quotainfo_t *qinf;
1157 	int		error;
1158 	xfs_dquot_t	*dqp;
1159 
1160 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1161 
1162 	/*
1163 	 * Tell XQM that we exist as soon as possible.
1164 	 */
1165 	if ((error = xfs_qm_hold_quotafs_ref(mp))) {
1166 		return (error);
1167 	}
1168 
1169 	qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP);
1170 
1171 	/*
1172 	 * See if quotainodes are setup, and if not, allocate them,
1173 	 * and change the superblock accordingly.
1174 	 */
1175 	if ((error = xfs_qm_init_quotainos(mp))) {
1176 		kmem_free(qinf, sizeof(xfs_quotainfo_t));
1177 		mp->m_quotainfo = NULL;
1178 		return (error);
1179 	}
1180 
1181 	spinlock_init(&qinf->qi_pinlock, "xfs_qinf_pin");
1182 	xfs_qm_list_init(&qinf->qi_dqlist, "mpdqlist", 0);
1183 	qinf->qi_dqreclaims = 0;
1184 
1185 	/* mutex used to serialize quotaoffs */
1186 	mutex_init(&qinf->qi_quotaofflock, MUTEX_DEFAULT, "qoff");
1187 
1188 	/* Precalc some constants */
1189 	qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
1190 	ASSERT(qinf->qi_dqchunklen);
1191 	qinf->qi_dqperchunk = BBTOB(qinf->qi_dqchunklen);
1192 	do_div(qinf->qi_dqperchunk, sizeof(xfs_dqblk_t));
1193 
1194 	mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
1195 
1196 	/*
1197 	 * We try to get the limits from the superuser's limits fields.
1198 	 * This is quite hacky, but it is standard quota practice.
1199 	 * We look at the USR dquot with id == 0 first, but if user quotas
1200 	 * are not enabled we goto the GRP dquot with id == 0.
1201 	 * We don't really care to keep separate default limits for user
1202 	 * and group quotas, at least not at this point.
1203 	 */
1204 	error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)0,
1205 			     (XFS_IS_UQUOTA_RUNNING(mp)) ?
1206 			     XFS_DQ_USER : XFS_DQ_GROUP,
1207 			     XFS_QMOPT_DQSUSER|XFS_QMOPT_DOWARN,
1208 			     &dqp);
1209 	if (! error) {
1210 		xfs_disk_dquot_t	*ddqp = &dqp->q_core;
1211 
1212 		/*
1213 		 * The warnings and timers set the grace period given to
1214 		 * a user or group before he or she can not perform any
1215 		 * more writing. If it is zero, a default is used.
1216 		 */
1217 		qinf->qi_btimelimit =
1218 				INT_GET(ddqp->d_btimer, ARCH_CONVERT) ?
1219 				INT_GET(ddqp->d_btimer, ARCH_CONVERT) :
1220 				XFS_QM_BTIMELIMIT;
1221 		qinf->qi_itimelimit =
1222 				INT_GET(ddqp->d_itimer, ARCH_CONVERT) ?
1223 				INT_GET(ddqp->d_itimer, ARCH_CONVERT) :
1224 				XFS_QM_ITIMELIMIT;
1225 		qinf->qi_rtbtimelimit =
1226 				INT_GET(ddqp->d_rtbtimer, ARCH_CONVERT) ?
1227 				INT_GET(ddqp->d_rtbtimer, ARCH_CONVERT) :
1228 				XFS_QM_RTBTIMELIMIT;
1229 		qinf->qi_bwarnlimit =
1230 				INT_GET(ddqp->d_bwarns, ARCH_CONVERT) ?
1231 				INT_GET(ddqp->d_bwarns, ARCH_CONVERT) :
1232 				XFS_QM_BWARNLIMIT;
1233 		qinf->qi_iwarnlimit =
1234 				INT_GET(ddqp->d_iwarns, ARCH_CONVERT) ?
1235 				INT_GET(ddqp->d_iwarns, ARCH_CONVERT) :
1236 				XFS_QM_IWARNLIMIT;
1237 		qinf->qi_bhardlimit =
1238 				INT_GET(ddqp->d_blk_hardlimit, ARCH_CONVERT);
1239 		qinf->qi_bsoftlimit =
1240 				INT_GET(ddqp->d_blk_softlimit, ARCH_CONVERT);
1241 		qinf->qi_ihardlimit =
1242 				INT_GET(ddqp->d_ino_hardlimit, ARCH_CONVERT);
1243 		qinf->qi_isoftlimit =
1244 				INT_GET(ddqp->d_ino_softlimit, ARCH_CONVERT);
1245 		qinf->qi_rtbhardlimit =
1246 				INT_GET(ddqp->d_rtb_hardlimit, ARCH_CONVERT);
1247 		qinf->qi_rtbsoftlimit =
1248 				INT_GET(ddqp->d_rtb_softlimit, ARCH_CONVERT);
1249 
1250 		/*
1251 		 * We sent the XFS_QMOPT_DQSUSER flag to dqget because
1252 		 * we don't want this dquot cached. We haven't done a
1253 		 * quotacheck yet, and quotacheck doesn't like incore dquots.
1254 		 */
1255 		xfs_qm_dqdestroy(dqp);
1256 	} else {
1257 		qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;
1258 		qinf->qi_itimelimit = XFS_QM_ITIMELIMIT;
1259 		qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT;
1260 		qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT;
1261 		qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT;
1262 	}
1263 
1264 	return (0);
1265 }
1266 
1267 
1268 /*
1269  * Gets called when unmounting a filesystem or when all quotas get
1270  * turned off.
1271  * This purges the quota inodes, destroys locks and frees itself.
1272  */
1273 void
xfs_qm_destroy_quotainfo(xfs_mount_t * mp)1274 xfs_qm_destroy_quotainfo(
1275 	xfs_mount_t	*mp)
1276 {
1277 	xfs_quotainfo_t *qi;
1278 
1279 	qi = mp->m_quotainfo;
1280 	ASSERT(qi != NULL);
1281 	ASSERT(xfs_Gqm != NULL);
1282 
1283 	/*
1284 	 * Release the reference that XQM kept, so that we know
1285 	 * when the XQM structure should be freed. We cannot assume
1286 	 * that xfs_Gqm is non-null after this point.
1287 	 */
1288 	xfs_qm_rele_quotafs_ref(mp);
1289 
1290 	spinlock_destroy(&qi->qi_pinlock);
1291 	xfs_qm_list_destroy(&qi->qi_dqlist);
1292 
1293 	if (qi->qi_uquotaip) {
1294 		XFS_PURGE_INODE(qi->qi_uquotaip);
1295 		qi->qi_uquotaip = NULL; /* paranoia */
1296 	}
1297 	if (qi->qi_gquotaip) {
1298 		XFS_PURGE_INODE(qi->qi_gquotaip);
1299 		qi->qi_gquotaip = NULL;
1300 	}
1301 	mutex_destroy(&qi->qi_quotaofflock);
1302 	kmem_free(qi, sizeof(xfs_quotainfo_t));
1303 	mp->m_quotainfo = NULL;
1304 }
1305 
1306 
1307 
1308 /* ------------------- PRIVATE STATIC FUNCTIONS ----------------------- */
1309 
1310 /* ARGSUSED */
1311 STATIC void
xfs_qm_list_init(xfs_dqlist_t * list,char * str,int n)1312 xfs_qm_list_init(
1313 	xfs_dqlist_t	*list,
1314 	char		*str,
1315 	int		n)
1316 {
1317 	mutex_init(&list->qh_lock, MUTEX_DEFAULT, str);
1318 	list->qh_next = NULL;
1319 	list->qh_version = 0;
1320 	list->qh_nelems = 0;
1321 }
1322 
1323 STATIC void
xfs_qm_list_destroy(xfs_dqlist_t * list)1324 xfs_qm_list_destroy(
1325 	xfs_dqlist_t	*list)
1326 {
1327 	mutex_destroy(&(list->qh_lock));
1328 }
1329 
1330 
1331 /*
1332  * Stripped down version of dqattach. This doesn't attach, or even look at the
1333  * dquots attached to the inode. The rationale is that there won't be any
1334  * attached at the time this is called from quotacheck.
1335  */
1336 STATIC int
xfs_qm_dqget_noattach(xfs_inode_t * ip,xfs_dquot_t ** O_udqpp,xfs_dquot_t ** O_gdqpp)1337 xfs_qm_dqget_noattach(
1338 	xfs_inode_t	*ip,
1339 	xfs_dquot_t	**O_udqpp,
1340 	xfs_dquot_t	**O_gdqpp)
1341 {
1342 	int		error;
1343 	xfs_mount_t	*mp;
1344 	xfs_dquot_t	*udqp, *gdqp;
1345 
1346 	ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
1347 	mp = ip->i_mount;
1348 	udqp = NULL;
1349 	gdqp = NULL;
1350 
1351 	if (XFS_IS_UQUOTA_ON(mp)) {
1352 		ASSERT(ip->i_udquot == NULL);
1353 		/*
1354 		 * We want the dquot allocated if it doesn't exist.
1355 		 */
1356 		if ((error = xfs_qm_dqget(mp, ip, ip->i_d.di_uid, XFS_DQ_USER,
1357 					 XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN,
1358 					 &udqp))) {
1359 			/*
1360 			 * Shouldn't be able to turn off quotas here.
1361 			 */
1362 			ASSERT(error != ESRCH);
1363 			ASSERT(error != ENOENT);
1364 			return (error);
1365 		}
1366 		ASSERT(udqp);
1367 	}
1368 
1369 	if (XFS_IS_GQUOTA_ON(mp)) {
1370 		ASSERT(ip->i_gdquot == NULL);
1371 		if (udqp)
1372 			xfs_dqunlock(udqp);
1373 		if ((error = xfs_qm_dqget(mp, ip, ip->i_d.di_gid, XFS_DQ_GROUP,
1374 					 XFS_QMOPT_DQALLOC|XFS_QMOPT_DOWARN,
1375 					 &gdqp))) {
1376 			if (udqp)
1377 				xfs_qm_dqrele(udqp);
1378 			ASSERT(error != ESRCH);
1379 			ASSERT(error != ENOENT);
1380 			return (error);
1381 		}
1382 		ASSERT(gdqp);
1383 
1384 		/* Reacquire the locks in the right order */
1385 		if (udqp) {
1386 			if (! xfs_qm_dqlock_nowait(udqp)) {
1387 				xfs_dqunlock(gdqp);
1388 				xfs_dqlock(udqp);
1389 				xfs_dqlock(gdqp);
1390 			}
1391 		}
1392 	}
1393 
1394 	*O_udqpp = udqp;
1395 	*O_gdqpp = gdqp;
1396 
1397 #ifdef QUOTADEBUG
1398 	if (udqp) ASSERT(XFS_DQ_IS_LOCKED(udqp));
1399 	if (gdqp) ASSERT(XFS_DQ_IS_LOCKED(gdqp));
1400 #endif
1401 	return (0);
1402 }
1403 
1404 /*
1405  * Create an inode and return with a reference already taken, but unlocked
1406  * This is how we create quota inodes
1407  */
1408 STATIC int
xfs_qm_qino_alloc(xfs_mount_t * mp,xfs_inode_t ** ip,__int64_t sbfields,uint flags)1409 xfs_qm_qino_alloc(
1410 	xfs_mount_t	*mp,
1411 	xfs_inode_t	**ip,
1412 	__int64_t	sbfields,
1413 	uint		flags)
1414 {
1415 	xfs_trans_t	*tp;
1416 	int		error;
1417 	unsigned long s;
1418 	cred_t		zerocr;
1419 	int		committed;
1420 
1421 	tp = xfs_trans_alloc(mp,XFS_TRANS_QM_QINOCREATE);
1422 	if ((error = xfs_trans_reserve(tp,
1423 				      XFS_QM_QINOCREATE_SPACE_RES(mp),
1424 				      XFS_CREATE_LOG_RES(mp), 0,
1425 				      XFS_TRANS_PERM_LOG_RES,
1426 				      XFS_CREATE_LOG_COUNT))) {
1427 		xfs_trans_cancel(tp, 0);
1428 		return (error);
1429 	}
1430 	memset(&zerocr, 0, sizeof(zerocr));
1431 
1432 	if ((error = xfs_dir_ialloc(&tp, mp->m_rootip, S_IFREG, 1, 0,
1433 				   &zerocr, 0, 1, ip, &committed))) {
1434 		xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
1435 				 XFS_TRANS_ABORT);
1436 		return (error);
1437 	}
1438 
1439 	/*
1440 	 * Keep an extra reference to this quota inode. This inode is
1441 	 * locked exclusively and joined to the transaction already.
1442 	 */
1443 	ASSERT(XFS_ISLOCKED_INODE_EXCL(*ip));
1444 	VN_HOLD(XFS_ITOV((*ip)));
1445 
1446 	/*
1447 	 * Make the changes in the superblock, and log those too.
1448 	 * sbfields arg may contain fields other than *QUOTINO;
1449 	 * VERSIONNUM for example.
1450 	 */
1451 	s = XFS_SB_LOCK(mp);
1452 	if (flags & XFS_QMOPT_SBVERSION) {
1453 #if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
1454 		unsigned oldv = mp->m_sb.sb_versionnum;
1455 #endif
1456 		ASSERT(!XFS_SB_VERSION_HASQUOTA(&mp->m_sb));
1457 		ASSERT((sbfields & (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
1458 				   XFS_SB_GQUOTINO | XFS_SB_QFLAGS)) ==
1459 		       (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
1460 			XFS_SB_GQUOTINO | XFS_SB_QFLAGS));
1461 
1462 		XFS_SB_VERSION_ADDQUOTA(&mp->m_sb);
1463 		mp->m_sb.sb_uquotino = NULLFSINO;
1464 		mp->m_sb.sb_gquotino = NULLFSINO;
1465 
1466 		/* qflags will get updated _after_ quotacheck */
1467 		mp->m_sb.sb_qflags = 0;
1468 #if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
1469 		cmn_err(CE_NOTE,
1470 			"Old superblock version %x, converting to %x.",
1471 			oldv, mp->m_sb.sb_versionnum);
1472 #endif
1473 	}
1474 	if (flags & XFS_QMOPT_UQUOTA)
1475 		mp->m_sb.sb_uquotino = (*ip)->i_ino;
1476 	else
1477 		mp->m_sb.sb_gquotino = (*ip)->i_ino;
1478 	XFS_SB_UNLOCK(mp, s);
1479 	xfs_mod_sb(tp, sbfields);
1480 
1481 	if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES,
1482 				     NULL))) {
1483 		xfs_fs_cmn_err(CE_ALERT, mp, "XFS qino_alloc failed!");
1484 		return (error);
1485 	}
1486 	return (0);
1487 }
1488 
1489 
1490 STATIC int
xfs_qm_reset_dqcounts(xfs_mount_t * mp,xfs_buf_t * bp,xfs_dqid_t id,uint type)1491 xfs_qm_reset_dqcounts(
1492 	xfs_mount_t	*mp,
1493 	xfs_buf_t	*bp,
1494 	xfs_dqid_t	id,
1495 	uint		type)
1496 {
1497 	xfs_disk_dquot_t	*ddq;
1498 	int			j;
1499 
1500 	xfs_buftrace("RESET DQUOTS", bp);
1501 	/*
1502 	 * Reset all counters and timers. They'll be
1503 	 * started afresh by xfs_qm_quotacheck.
1504 	 */
1505 #ifdef DEBUG
1506 	j = XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
1507 	do_div(j, sizeof(xfs_dqblk_t));
1508 	ASSERT(XFS_QM_DQPERBLK(mp) == j);
1509 #endif
1510 	ddq = (xfs_disk_dquot_t *)XFS_BUF_PTR(bp);
1511 	for (j = 0; j < XFS_QM_DQPERBLK(mp); j++) {
1512 		/*
1513 		 * Do a sanity check, and if needed, repair the dqblk. Don't
1514 		 * output any warnings because it's perfectly possible to
1515 		 * find unitialized dquot blks. See comment in xfs_qm_dqcheck.
1516 		 */
1517 		(void) xfs_qm_dqcheck(ddq, id+j, type, XFS_QMOPT_DQREPAIR,
1518 				      "xfs_quotacheck");
1519 		INT_SET(ddq->d_bcount, ARCH_CONVERT, 0ULL);
1520 		INT_SET(ddq->d_icount, ARCH_CONVERT, 0ULL);
1521 		INT_SET(ddq->d_rtbcount, ARCH_CONVERT, 0ULL);
1522 		INT_SET(ddq->d_btimer, ARCH_CONVERT, (time_t)0);
1523 		INT_SET(ddq->d_itimer, ARCH_CONVERT, (time_t)0);
1524 		INT_SET(ddq->d_bwarns, ARCH_CONVERT, 0UL);
1525 		INT_SET(ddq->d_iwarns, ARCH_CONVERT, 0UL);
1526 		ddq = (xfs_disk_dquot_t *) ((xfs_dqblk_t *)ddq + 1);
1527 	}
1528 
1529 	return (0);
1530 }
1531 
1532 STATIC int
xfs_qm_dqiter_bufs(xfs_mount_t * mp,xfs_dqid_t firstid,xfs_fsblock_t bno,xfs_filblks_t blkcnt,uint flags)1533 xfs_qm_dqiter_bufs(
1534 	xfs_mount_t	*mp,
1535 	xfs_dqid_t	firstid,
1536 	xfs_fsblock_t	bno,
1537 	xfs_filblks_t	blkcnt,
1538 	uint		flags)
1539 {
1540 	xfs_buf_t	*bp;
1541 	int		error;
1542 	int		notcommitted;
1543 	int		incr;
1544 
1545 	ASSERT(blkcnt > 0);
1546 	notcommitted = 0;
1547 	incr = (blkcnt > XFS_QM_MAX_DQCLUSTER_LOGSZ) ?
1548 		XFS_QM_MAX_DQCLUSTER_LOGSZ : blkcnt;
1549 	error = 0;
1550 
1551 	/*
1552 	 * Blkcnt arg can be a very big number, and might even be
1553 	 * larger than the log itself. So, we have to break it up into
1554 	 * manageable-sized transactions.
1555 	 * Note that we don't start a permanent transaction here; we might
1556 	 * not be able to get a log reservation for the whole thing up front,
1557 	 * and we don't really care to either, because we just discard
1558 	 * everything if we were to crash in the middle of this loop.
1559 	 */
1560 	while (blkcnt--) {
1561 		error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
1562 			      XFS_FSB_TO_DADDR(mp, bno),
1563 			      (int)XFS_QI_DQCHUNKLEN(mp), 0, &bp);
1564 		if (error)
1565 			break;
1566 
1567 		(void) xfs_qm_reset_dqcounts(mp, bp, firstid,
1568 					     flags & XFS_QMOPT_UQUOTA ?
1569 					     XFS_DQ_USER : XFS_DQ_GROUP);
1570 		xfs_bdwrite(mp, bp);
1571 		/*
1572 		 * goto the next block.
1573 		 */
1574 		bno++;
1575 		firstid += XFS_QM_DQPERBLK(mp);
1576 	}
1577 	return (error);
1578 }
1579 
1580 /*
1581  * Iterate over all allocated USR/GRP dquots in the system, calling a
1582  * caller supplied function for every chunk of dquots that we find.
1583  */
1584 STATIC int
xfs_qm_dqiterate(xfs_mount_t * mp,xfs_inode_t * qip,uint flags)1585 xfs_qm_dqiterate(
1586 	xfs_mount_t	*mp,
1587 	xfs_inode_t	*qip,
1588 	uint		flags)
1589 {
1590 	xfs_bmbt_irec_t		*map;
1591 	int			i, nmaps;	/* number of map entries */
1592 	int			error;		/* return value */
1593 	xfs_fileoff_t		lblkno;
1594 	xfs_filblks_t		maxlblkcnt;
1595 	xfs_dqid_t		firstid;
1596 	xfs_fsblock_t		rablkno;
1597 	xfs_filblks_t		rablkcnt;
1598 
1599 	error = 0;
1600 	/*
1601 	 * This looks racey, but we can't keep an inode lock across a
1602 	 * trans_reserve. But, this gets called during quotacheck, and that
1603 	 * happens only at mount time which is single threaded.
1604 	 */
1605 	if (qip->i_d.di_nblocks == 0)
1606 		return (0);
1607 
1608 	map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP);
1609 
1610 	lblkno = 0;
1611 	maxlblkcnt = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
1612 	do {
1613 		nmaps = XFS_DQITER_MAP_SIZE;
1614 		/*
1615 		 * We aren't changing the inode itself. Just changing
1616 		 * some of its data. No new blocks are added here, and
1617 		 * the inode is never added to the transaction.
1618 		 */
1619 		xfs_ilock(qip, XFS_ILOCK_SHARED);
1620 		error = xfs_bmapi(NULL, qip, lblkno,
1621 				  maxlblkcnt - lblkno,
1622 				  XFS_BMAPI_METADATA,
1623 				  NULL,
1624 				  0, map, &nmaps, NULL);
1625 		xfs_iunlock(qip, XFS_ILOCK_SHARED);
1626 		if (error)
1627 			break;
1628 
1629 		ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
1630 		for (i = 0; i < nmaps; i++) {
1631 			ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
1632 			ASSERT(map[i].br_blockcount);
1633 
1634 
1635 			lblkno += map[i].br_blockcount;
1636 
1637 			if (map[i].br_startblock == HOLESTARTBLOCK)
1638 				continue;
1639 
1640 			firstid = (xfs_dqid_t) map[i].br_startoff *
1641 				XFS_QM_DQPERBLK(mp);
1642 			/*
1643 			 * Do a read-ahead on the next extent.
1644 			 */
1645 			if ((i+1 < nmaps) &&
1646 			    (map[i+1].br_startblock != HOLESTARTBLOCK)) {
1647 				rablkcnt =  map[i+1].br_blockcount;
1648 				rablkno = map[i+1].br_startblock;
1649 				while (rablkcnt--) {
1650 					xfs_baread(mp->m_ddev_targp,
1651 					       XFS_FSB_TO_DADDR(mp, rablkno),
1652 					       (int)XFS_QI_DQCHUNKLEN(mp));
1653 					rablkno++;
1654 				}
1655 			}
1656 			/*
1657 			 * Iterate thru all the blks in the extent and
1658 			 * reset the counters of all the dquots inside them.
1659 			 */
1660 			if ((error = xfs_qm_dqiter_bufs(mp,
1661 						       firstid,
1662 						       map[i].br_startblock,
1663 						       map[i].br_blockcount,
1664 						       flags))) {
1665 				break;
1666 			}
1667 		}
1668 
1669 		if (error)
1670 			break;
1671 	} while (nmaps > 0);
1672 
1673 	kmem_free(map, XFS_DQITER_MAP_SIZE * sizeof(*map));
1674 
1675 	return (error);
1676 }
1677 
1678 /*
1679  * Called by dqusage_adjust in doing a quotacheck.
1680  * Given the inode, and a dquot (either USR or GRP, doesn't matter),
1681  * this updates its incore copy as well as the buffer copy. This is
1682  * so that once the quotacheck is done, we can just log all the buffers,
1683  * as opposed to logging numerous updates to individual dquots.
1684  */
1685 STATIC void
xfs_qm_quotacheck_dqadjust(xfs_dquot_t * dqp,xfs_qcnt_t nblks,xfs_qcnt_t rtblks)1686 xfs_qm_quotacheck_dqadjust(
1687 	xfs_dquot_t		*dqp,
1688 	xfs_qcnt_t		nblks,
1689 	xfs_qcnt_t		rtblks)
1690 {
1691 	ASSERT(XFS_DQ_IS_LOCKED(dqp));
1692 	xfs_dqtrace_entry(dqp, "QCHECK DQADJUST");
1693 	/*
1694 	 * Adjust the inode count and the block count to reflect this inode's
1695 	 * resource usage.
1696 	 */
1697 	INT_MOD(dqp->q_core.d_icount, ARCH_CONVERT, +1);
1698 	dqp->q_res_icount++;
1699 	if (nblks) {
1700 		INT_MOD(dqp->q_core.d_bcount, ARCH_CONVERT, nblks);
1701 		dqp->q_res_bcount += nblks;
1702 	}
1703 	if (rtblks) {
1704 		INT_MOD(dqp->q_core.d_rtbcount, ARCH_CONVERT, rtblks);
1705 		dqp->q_res_rtbcount += rtblks;
1706 	}
1707 
1708 	/*
1709 	 * Set default limits, adjust timers (since we changed usages)
1710 	 */
1711 	if (! XFS_IS_SUSER_DQUOT(dqp)) {
1712 		xfs_qm_adjust_dqlimits(dqp->q_mount, &dqp->q_core);
1713 		xfs_qm_adjust_dqtimers(dqp->q_mount, &dqp->q_core);
1714 	}
1715 
1716 	dqp->dq_flags |= XFS_DQ_DIRTY;
1717 }
1718 
1719 STATIC int
xfs_qm_get_rtblks(xfs_inode_t * ip,xfs_qcnt_t * O_rtblks)1720 xfs_qm_get_rtblks(
1721 	xfs_inode_t	*ip,
1722 	xfs_qcnt_t	*O_rtblks)
1723 {
1724 	xfs_filblks_t	rtblks;			/* total rt blks */
1725 	xfs_ifork_t	*ifp;			/* inode fork pointer */
1726 	xfs_extnum_t	nextents;		/* number of extent entries */
1727 	xfs_bmbt_rec_t	*base;			/* base of extent array */
1728 	xfs_bmbt_rec_t	*ep;			/* pointer to an extent entry */
1729 	int		error;
1730 
1731 	ASSERT(XFS_IS_REALTIME_INODE(ip));
1732 	ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1733 	if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1734 		if ((error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK)))
1735 			return (error);
1736 	}
1737 	rtblks = 0;
1738 	nextents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t);
1739 	base = &ifp->if_u1.if_extents[0];
1740 	for (ep = base; ep < &base[nextents]; ep++)
1741 		rtblks += xfs_bmbt_get_blockcount(ep);
1742 	*O_rtblks = (xfs_qcnt_t)rtblks;
1743 	return (0);
1744 }
1745 
1746 /*
1747  * callback routine supplied to bulkstat(). Given an inumber, find its
1748  * dquots and update them to account for resources taken by that inode.
1749  */
1750 /* ARGSUSED */
1751 STATIC int
xfs_qm_dqusage_adjust(xfs_mount_t * mp,xfs_ino_t ino,void __user * buffer,int ubsize,void * private_data,xfs_daddr_t bno,int * ubused,void * dip,int * res)1752 xfs_qm_dqusage_adjust(
1753 	xfs_mount_t	*mp,		/* mount point for filesystem */
1754 	xfs_ino_t	ino,		/* inode number to get data for */
1755 	void		__user *buffer,	/* not used */
1756 	int		ubsize,		/* not used */
1757 	void		*private_data,	/* not used */
1758 	xfs_daddr_t	bno,		/* starting block of inode cluster */
1759 	int		*ubused,	/* not used */
1760 	void		*dip,		/* on-disk inode pointer (not used) */
1761 	int		*res)		/* result code value */
1762 {
1763 	xfs_inode_t	*ip;
1764 	xfs_dquot_t	*udqp, *gdqp;
1765 	xfs_qcnt_t	nblks, rtblks;
1766 	int		error;
1767 
1768 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1769 
1770 	/*
1771 	 * rootino must have its resources accounted for, not so with the quota
1772 	 * inodes.
1773 	 */
1774 	if (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino) {
1775 		*res = BULKSTAT_RV_NOTHING;
1776 		return XFS_ERROR(EINVAL);
1777 	}
1778 
1779 	/*
1780 	 * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget
1781 	 * interface expects the inode to be exclusively locked because that's
1782 	 * the case in all other instances. It's OK that we do this because
1783 	 * quotacheck is done only at mount time.
1784 	 */
1785 	if ((error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip, bno))) {
1786 		*res = BULKSTAT_RV_NOTHING;
1787 		return (error);
1788 	}
1789 
1790 	if (ip->i_d.di_mode == 0) {
1791 		xfs_iput_new(ip, XFS_ILOCK_EXCL);
1792 		*res = BULKSTAT_RV_NOTHING;
1793 		return XFS_ERROR(ENOENT);
1794 	}
1795 
1796 	/*
1797 	 * Obtain the locked dquots. In case of an error (eg. allocation
1798 	 * fails for ENOSPC), we return the negative of the error number
1799 	 * to bulkstat, so that it can get propagated to quotacheck() and
1800 	 * making us disable quotas for the file system.
1801 	 */
1802 	if ((error = xfs_qm_dqget_noattach(ip, &udqp, &gdqp))) {
1803 		xfs_iput(ip, XFS_ILOCK_EXCL);
1804 		*res = BULKSTAT_RV_GIVEUP;
1805 		return (error);
1806 	}
1807 
1808 	rtblks = 0;
1809 	if (! XFS_IS_REALTIME_INODE(ip)) {
1810 		nblks = (xfs_qcnt_t)ip->i_d.di_nblocks;
1811 	} else {
1812 		/*
1813 		 * Walk thru the extent list and count the realtime blocks.
1814 		 */
1815 		if ((error = xfs_qm_get_rtblks(ip, &rtblks))) {
1816 			xfs_iput(ip, XFS_ILOCK_EXCL);
1817 			if (udqp)
1818 				xfs_qm_dqput(udqp);
1819 			if (gdqp)
1820 				xfs_qm_dqput(gdqp);
1821 			*res = BULKSTAT_RV_GIVEUP;
1822 			return (error);
1823 		}
1824 		nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
1825 	}
1826 	ASSERT(ip->i_delayed_blks == 0);
1827 
1828 	/*
1829 	 * We can't release the inode while holding its dquot locks.
1830 	 * The inode can go into inactive and might try to acquire the dquotlocks.
1831 	 * So, just unlock here and do a vn_rele at the end.
1832 	 */
1833 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1834 
1835 	/*
1836 	 * Add the (disk blocks and inode) resources occupied by this
1837 	 * inode to its dquots. We do this adjustment in the incore dquot,
1838 	 * and also copy the changes to its buffer.
1839 	 * We don't care about putting these changes in a transaction
1840 	 * envelope because if we crash in the middle of a 'quotacheck'
1841 	 * we have to start from the beginning anyway.
1842 	 * Once we're done, we'll log all the dquot bufs.
1843 	 *
1844 	 * The *QUOTA_ON checks below may look pretty racey, but quotachecks
1845 	 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1846 	 */
1847 	if (XFS_IS_UQUOTA_ON(mp)) {
1848 		ASSERT(udqp);
1849 		xfs_qm_quotacheck_dqadjust(udqp, nblks, rtblks);
1850 		xfs_qm_dqput(udqp);
1851 	}
1852 	if (XFS_IS_GQUOTA_ON(mp)) {
1853 		ASSERT(gdqp);
1854 		xfs_qm_quotacheck_dqadjust(gdqp, nblks, rtblks);
1855 		xfs_qm_dqput(gdqp);
1856 	}
1857 	/*
1858 	 * Now release the inode. This will send it to 'inactive', and
1859 	 * possibly even free blocks.
1860 	 */
1861 	VN_RELE(XFS_ITOV(ip));
1862 
1863 	/*
1864 	 * Goto next inode.
1865 	 */
1866 	*res = BULKSTAT_RV_DIDONE;
1867 	return (0);
1868 }
1869 
1870 /*
1871  * Walk thru all the filesystem inodes and construct a consistent view
1872  * of the disk quota world. If the quotacheck fails, disable quotas.
1873  */
1874 int
xfs_qm_quotacheck(xfs_mount_t * mp)1875 xfs_qm_quotacheck(
1876 	xfs_mount_t	*mp)
1877 {
1878 	int		done, count, error;
1879 	xfs_ino_t	lastino;
1880 	size_t		structsz;
1881 	xfs_inode_t	*uip, *gip;
1882 	uint		flags;
1883 
1884 	count = INT_MAX;
1885 	structsz = 1;
1886 	lastino = 0;
1887 	flags = 0;
1888 
1889 	ASSERT(XFS_QI_UQIP(mp) || XFS_QI_GQIP(mp));
1890 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1891 
1892 	/*
1893 	 * There should be no cached dquots. The (simplistic) quotacheck
1894 	 * algorithm doesn't like that.
1895 	 */
1896 	ASSERT(XFS_QI_MPLNDQUOTS(mp) == 0);
1897 
1898 	cmn_err(CE_NOTE, "XFS quotacheck %s: Please wait.", mp->m_fsname);
1899 
1900 	/*
1901 	 * First we go thru all the dquots on disk, USR and GRP, and reset
1902 	 * their counters to zero. We need a clean slate.
1903 	 * We don't log our changes till later.
1904 	 */
1905 	if ((uip = XFS_QI_UQIP(mp))) {
1906 		if ((error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA)))
1907 			goto error_return;
1908 		flags |= XFS_UQUOTA_CHKD;
1909 	}
1910 
1911 	if ((gip = XFS_QI_GQIP(mp))) {
1912 		if ((error = xfs_qm_dqiterate(mp, gip, XFS_QMOPT_GQUOTA)))
1913 			goto error_return;
1914 		flags |= XFS_GQUOTA_CHKD;
1915 	}
1916 
1917 	do {
1918 		/*
1919 		 * Iterate thru all the inodes in the file system,
1920 		 * adjusting the corresponding dquot counters in core.
1921 		 */
1922 		if ((error = xfs_bulkstat(mp, &lastino, &count,
1923 				     xfs_qm_dqusage_adjust, NULL,
1924 				     structsz, NULL,
1925 				     BULKSTAT_FG_IGET|BULKSTAT_FG_VFSLOCKED,
1926 				     &done)))
1927 			break;
1928 
1929 	} while (! done);
1930 
1931 	/*
1932 	 * We can get this error if we couldn't do a dquot allocation inside
1933 	 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1934 	 * dirty dquots that might be cached, we just want to get rid of them
1935 	 * and turn quotaoff. The dquots won't be attached to any of the inodes
1936 	 * at this point (because we intentionally didn't in dqget_noattach).
1937 	 */
1938 	if (error) {
1939 		xfs_qm_dqpurge_all(mp,
1940 				   XFS_QMOPT_UQUOTA|XFS_QMOPT_GQUOTA|
1941 				   XFS_QMOPT_QUOTAOFF);
1942 		goto error_return;
1943 	}
1944 	/*
1945 	 * We've made all the changes that we need to make incore.
1946 	 * Now flush_them down to disk buffers.
1947 	 */
1948 	xfs_qm_dqflush_all(mp, XFS_QMOPT_DELWRI);
1949 
1950 	/*
1951 	 * We didn't log anything, because if we crashed, we'll have to
1952 	 * start the quotacheck from scratch anyway. However, we must make
1953 	 * sure that our dquot changes are secure before we put the
1954 	 * quotacheck'd stamp on the superblock. So, here we do a synchronous
1955 	 * flush.
1956 	 */
1957 	XFS_bflush(mp->m_ddev_targp);
1958 
1959 	/*
1960 	 * If one type of quotas is off, then it will lose its
1961 	 * quotachecked status, since we won't be doing accounting for
1962 	 * that type anymore.
1963 	 */
1964 	mp->m_qflags &= ~(XFS_GQUOTA_CHKD | XFS_UQUOTA_CHKD);
1965 	mp->m_qflags |= flags;
1966 
1967 	XQM_LIST_PRINT(&(XFS_QI_MPL_LIST(mp)), MPL_NEXT, "++++ Mp list +++");
1968 
1969  error_return:
1970 	if (error) {
1971 		cmn_err(CE_WARN, "XFS quotacheck %s: Unsuccessful (Error %d): "
1972 			"Disabling quotas.",
1973 			mp->m_fsname, error);
1974 		/*
1975 		 * We must turn off quotas.
1976 		 */
1977 		ASSERT(mp->m_quotainfo != NULL);
1978 		ASSERT(xfs_Gqm != NULL);
1979 		xfs_qm_destroy_quotainfo(mp);
1980 		xfs_mount_reset_sbqflags(mp);
1981 	} else {
1982 		cmn_err(CE_NOTE, "XFS quotacheck %s: Done.", mp->m_fsname);
1983 	}
1984 	return (error);
1985 }
1986 
1987 /*
1988  * This is called after the superblock has been read in and we're ready to
1989  * iget the quota inodes.
1990  */
1991 STATIC int
xfs_qm_init_quotainos(xfs_mount_t * mp)1992 xfs_qm_init_quotainos(
1993 	xfs_mount_t	*mp)
1994 {
1995 	xfs_inode_t	*uip, *gip;
1996 	int		error;
1997 	__int64_t	sbflags;
1998 	uint		flags;
1999 
2000 	ASSERT(mp->m_quotainfo);
2001 	uip = gip = NULL;
2002 	sbflags = 0;
2003 	flags = 0;
2004 
2005 	/*
2006 	 * Get the uquota and gquota inodes
2007 	 */
2008 	if (XFS_SB_VERSION_HASQUOTA(&mp->m_sb)) {
2009 		if (XFS_IS_UQUOTA_ON(mp) &&
2010 		    mp->m_sb.sb_uquotino != NULLFSINO) {
2011 			ASSERT(mp->m_sb.sb_uquotino > 0);
2012 			if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
2013 					     0, 0, &uip, 0)))
2014 				return XFS_ERROR(error);
2015 		}
2016 		if (XFS_IS_GQUOTA_ON(mp) &&
2017 		    mp->m_sb.sb_gquotino != NULLFSINO) {
2018 			ASSERT(mp->m_sb.sb_gquotino > 0);
2019 			if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
2020 					     0, 0, &gip, 0))) {
2021 				if (uip)
2022 					VN_RELE(XFS_ITOV(uip));
2023 				return XFS_ERROR(error);
2024 			}
2025 		}
2026 	} else {
2027 		flags |= XFS_QMOPT_SBVERSION;
2028 		sbflags |= (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
2029 			    XFS_SB_GQUOTINO | XFS_SB_QFLAGS);
2030 	}
2031 
2032 	/*
2033 	 * Create the two inodes, if they don't exist already. The changes
2034 	 * made above will get added to a transaction and logged in one of
2035 	 * the qino_alloc calls below.  If the device is readonly,
2036 	 * temporarily switch to read-write to do this.
2037 	 */
2038 	if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
2039 		if ((error = xfs_qm_qino_alloc(mp, &uip,
2040 					      sbflags | XFS_SB_UQUOTINO,
2041 					      flags | XFS_QMOPT_UQUOTA)))
2042 			return XFS_ERROR(error);
2043 
2044 		flags &= ~XFS_QMOPT_SBVERSION;
2045 	}
2046 	if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
2047 		if ((error = xfs_qm_qino_alloc(mp, &gip,
2048 					      sbflags | XFS_SB_GQUOTINO,
2049 					      flags | XFS_QMOPT_GQUOTA))) {
2050 			if (uip)
2051 				VN_RELE(XFS_ITOV(uip));
2052 
2053 			return XFS_ERROR(error);
2054 		}
2055 	}
2056 
2057 	XFS_QI_UQIP(mp) = uip;
2058 	XFS_QI_GQIP(mp) = gip;
2059 
2060 	return (0);
2061 }
2062 
2063 
2064 /*
2065  * Traverse the freelist of dquots and attempt to reclaim a maximum of
2066  * 'howmany' dquots. This operation races with dqlookup(), and attempts to
2067  * favor the lookup function ...
2068  * XXXsup merge this with qm_reclaim_one().
2069  */
2070 STATIC int
xfs_qm_shake_freelist(int howmany)2071 xfs_qm_shake_freelist(
2072 	int howmany)
2073 {
2074 	int		nreclaimed;
2075 	xfs_dqhash_t	*hash;
2076 	xfs_dquot_t	*dqp, *nextdqp;
2077 	int		restarts;
2078 	int		nflushes;
2079 
2080 	if (howmany <= 0)
2081 		return (0);
2082 
2083 	nreclaimed = 0;
2084 	restarts = 0;
2085 	nflushes = 0;
2086 
2087 #ifdef QUOTADEBUG
2088 	cmn_err(CE_DEBUG, "Shake free 0x%x", howmany);
2089 #endif
2090 	/* lock order is : hashchainlock, freelistlock, mplistlock */
2091  tryagain:
2092 	xfs_qm_freelist_lock(xfs_Gqm);
2093 
2094 	for (dqp = xfs_Gqm->qm_dqfreelist.qh_next;
2095 	     ((dqp != (xfs_dquot_t *) &xfs_Gqm->qm_dqfreelist) &&
2096 	      nreclaimed < howmany); ) {
2097 		xfs_dqlock(dqp);
2098 
2099 		/*
2100 		 * We are racing with dqlookup here. Naturally we don't
2101 		 * want to reclaim a dquot that lookup wants.
2102 		 */
2103 		if (dqp->dq_flags & XFS_DQ_WANT) {
2104 			xfs_dqunlock(dqp);
2105 			xfs_qm_freelist_unlock(xfs_Gqm);
2106 			if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
2107 				return (nreclaimed);
2108 			XQM_STATS_INC(xqmstats.xs_qm_dqwants);
2109 			goto tryagain;
2110 		}
2111 
2112 		/*
2113 		 * If the dquot is inactive, we are assured that it is
2114 		 * not on the mplist or the hashlist, and that makes our
2115 		 * life easier.
2116 		 */
2117 		if (dqp->dq_flags & XFS_DQ_INACTIVE) {
2118 			ASSERT(dqp->q_mount == NULL);
2119 			ASSERT(! XFS_DQ_IS_DIRTY(dqp));
2120 			ASSERT(dqp->HL_PREVP == NULL);
2121 			ASSERT(dqp->MPL_PREVP == NULL);
2122 			XQM_STATS_INC(xqmstats.xs_qm_dqinact_reclaims);
2123 			nextdqp = dqp->dq_flnext;
2124 			goto off_freelist;
2125 		}
2126 
2127 		ASSERT(dqp->MPL_PREVP);
2128 		/*
2129 		 * Try to grab the flush lock. If this dquot is in the process of
2130 		 * getting flushed to disk, we don't want to reclaim it.
2131 		 */
2132 		if (! xfs_qm_dqflock_nowait(dqp)) {
2133 			xfs_dqunlock(dqp);
2134 			dqp = dqp->dq_flnext;
2135 			continue;
2136 		}
2137 
2138 		/*
2139 		 * We have the flush lock so we know that this is not in the
2140 		 * process of being flushed. So, if this is dirty, flush it
2141 		 * DELWRI so that we don't get a freelist infested with
2142 		 * dirty dquots.
2143 		 */
2144 		if (XFS_DQ_IS_DIRTY(dqp)) {
2145 			xfs_dqtrace_entry(dqp, "DQSHAKE: DQDIRTY");
2146 			/*
2147 			 * We flush it delayed write, so don't bother
2148 			 * releasing the mplock.
2149 			 */
2150 			(void) xfs_qm_dqflush(dqp, XFS_QMOPT_DELWRI);
2151 			xfs_dqunlock(dqp); /* dqflush unlocks dqflock */
2152 			dqp = dqp->dq_flnext;
2153 			continue;
2154 		}
2155 		/*
2156 		 * We're trying to get the hashlock out of order. This races
2157 		 * with dqlookup; so, we giveup and goto the next dquot if
2158 		 * we couldn't get the hashlock. This way, we won't starve
2159 		 * a dqlookup process that holds the hashlock that is
2160 		 * waiting for the freelist lock.
2161 		 */
2162 		if (! xfs_qm_dqhashlock_nowait(dqp)) {
2163 			xfs_dqfunlock(dqp);
2164 			xfs_dqunlock(dqp);
2165 			dqp = dqp->dq_flnext;
2166 			continue;
2167 		}
2168 		/*
2169 		 * This races with dquot allocation code as well as dqflush_all
2170 		 * and reclaim code. So, if we failed to grab the mplist lock,
2171 		 * giveup everything and start over.
2172 		 */
2173 		hash = dqp->q_hash;
2174 		ASSERT(hash);
2175 		if (! xfs_qm_mplist_nowait(dqp->q_mount)) {
2176 			/* XXX put a sentinel so that we can come back here */
2177 			xfs_dqfunlock(dqp);
2178 			xfs_dqunlock(dqp);
2179 			XFS_DQ_HASH_UNLOCK(hash);
2180 			xfs_qm_freelist_unlock(xfs_Gqm);
2181 			if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
2182 				return (nreclaimed);
2183 			goto tryagain;
2184 		}
2185 		xfs_dqtrace_entry(dqp, "DQSHAKE: UNLINKING");
2186 #ifdef QUOTADEBUG
2187 		cmn_err(CE_DEBUG, "Shake 0x%p, ID 0x%x\n",
2188 			dqp, INT_GET(dqp->q_core.d_id, ARCH_CONVERT));
2189 #endif
2190 		ASSERT(dqp->q_nrefs == 0);
2191 		nextdqp = dqp->dq_flnext;
2192 		XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(dqp->q_mount)), dqp);
2193 		XQM_HASHLIST_REMOVE(hash, dqp);
2194 		xfs_dqfunlock(dqp);
2195 		xfs_qm_mplist_unlock(dqp->q_mount);
2196 		XFS_DQ_HASH_UNLOCK(hash);
2197 
2198  off_freelist:
2199 		XQM_FREELIST_REMOVE(dqp);
2200 		xfs_dqunlock(dqp);
2201 		nreclaimed++;
2202 		XQM_STATS_INC(xqmstats.xs_qm_dqshake_reclaims);
2203 		xfs_qm_dqdestroy(dqp);
2204 		dqp = nextdqp;
2205 	}
2206 	xfs_qm_freelist_unlock(xfs_Gqm);
2207 	return (nreclaimed);
2208 }
2209 
2210 
2211 /*
2212  * The kmem_shake interface is invoked when memory is running low.
2213  */
2214 /* ARGSUSED */
2215 STATIC int
xfs_qm_shake(int nr_to_scan,unsigned int gfp_mask)2216 xfs_qm_shake(int nr_to_scan, unsigned int gfp_mask)
2217 {
2218 	int	ndqused, nfree, n;
2219 
2220 	if (!kmem_shake_allow(gfp_mask))
2221 		return (0);
2222 	if (!xfs_Gqm)
2223 		return (0);
2224 
2225 	nfree = xfs_Gqm->qm_dqfreelist.qh_nelems; /* free dquots */
2226 	/* incore dquots in all f/s's */
2227 	ndqused = atomic_read(&xfs_Gqm->qm_totaldquots) - nfree;
2228 
2229 	ASSERT(ndqused >= 0);
2230 
2231 	if (nfree <= ndqused && nfree < ndquot)
2232 		return (0);
2233 
2234 	ndqused *= xfs_Gqm->qm_dqfree_ratio;	/* target # of free dquots */
2235 	n = nfree - ndqused - ndquot;		/* # over target */
2236 
2237 	return xfs_qm_shake_freelist(MAX(nfree, n));
2238 }
2239 
2240 
2241 /*
2242  * Just pop the least recently used dquot off the freelist and
2243  * recycle it. The returned dquot is locked.
2244  */
2245 STATIC xfs_dquot_t *
xfs_qm_dqreclaim_one(void)2246 xfs_qm_dqreclaim_one(void)
2247 {
2248 	xfs_dquot_t	*dqpout;
2249 	xfs_dquot_t	*dqp;
2250 	int		restarts;
2251 	int		nflushes;
2252 
2253 	restarts = 0;
2254 	dqpout = NULL;
2255 	nflushes = 0;
2256 
2257 	/* lockorder: hashchainlock, freelistlock, mplistlock, dqlock, dqflock */
2258  startagain:
2259 	xfs_qm_freelist_lock(xfs_Gqm);
2260 
2261 	FOREACH_DQUOT_IN_FREELIST(dqp, &(xfs_Gqm->qm_dqfreelist)) {
2262 		xfs_dqlock(dqp);
2263 
2264 		/*
2265 		 * We are racing with dqlookup here. Naturally we don't
2266 		 * want to reclaim a dquot that lookup wants. We release the
2267 		 * freelist lock and start over, so that lookup will grab
2268 		 * both the dquot and the freelistlock.
2269 		 */
2270 		if (dqp->dq_flags & XFS_DQ_WANT) {
2271 			ASSERT(! (dqp->dq_flags & XFS_DQ_INACTIVE));
2272 			xfs_dqtrace_entry(dqp, "DQRECLAIM: DQWANT");
2273 			xfs_dqunlock(dqp);
2274 			xfs_qm_freelist_unlock(xfs_Gqm);
2275 			if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
2276 				return (NULL);
2277 			XQM_STATS_INC(xqmstats.xs_qm_dqwants);
2278 			goto startagain;
2279 		}
2280 
2281 		/*
2282 		 * If the dquot is inactive, we are assured that it is
2283 		 * not on the mplist or the hashlist, and that makes our
2284 		 * life easier.
2285 		 */
2286 		if (dqp->dq_flags & XFS_DQ_INACTIVE) {
2287 			ASSERT(dqp->q_mount == NULL);
2288 			ASSERT(! XFS_DQ_IS_DIRTY(dqp));
2289 			ASSERT(dqp->HL_PREVP == NULL);
2290 			ASSERT(dqp->MPL_PREVP == NULL);
2291 			XQM_FREELIST_REMOVE(dqp);
2292 			xfs_dqunlock(dqp);
2293 			dqpout = dqp;
2294 			XQM_STATS_INC(xqmstats.xs_qm_dqinact_reclaims);
2295 			break;
2296 		}
2297 
2298 		ASSERT(dqp->q_hash);
2299 		ASSERT(dqp->MPL_PREVP);
2300 
2301 		/*
2302 		 * Try to grab the flush lock. If this dquot is in the process of
2303 		 * getting flushed to disk, we don't want to reclaim it.
2304 		 */
2305 		if (! xfs_qm_dqflock_nowait(dqp)) {
2306 			xfs_dqunlock(dqp);
2307 			continue;
2308 		}
2309 
2310 		/*
2311 		 * We have the flush lock so we know that this is not in the
2312 		 * process of being flushed. So, if this is dirty, flush it
2313 		 * DELWRI so that we don't get a freelist infested with
2314 		 * dirty dquots.
2315 		 */
2316 		if (XFS_DQ_IS_DIRTY(dqp)) {
2317 			xfs_dqtrace_entry(dqp, "DQRECLAIM: DQDIRTY");
2318 			/*
2319 			 * We flush it delayed write, so don't bother
2320 			 * releasing the freelist lock.
2321 			 */
2322 			(void) xfs_qm_dqflush(dqp, XFS_QMOPT_DELWRI);
2323 			xfs_dqunlock(dqp); /* dqflush unlocks dqflock */
2324 			continue;
2325 		}
2326 
2327 		if (! xfs_qm_mplist_nowait(dqp->q_mount)) {
2328 			xfs_dqfunlock(dqp);
2329 			xfs_dqunlock(dqp);
2330 			continue;
2331 		}
2332 
2333 		if (! xfs_qm_dqhashlock_nowait(dqp))
2334 			goto mplistunlock;
2335 
2336 		ASSERT(dqp->q_nrefs == 0);
2337 		xfs_dqtrace_entry(dqp, "DQRECLAIM: UNLINKING");
2338 		XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(dqp->q_mount)), dqp);
2339 		XQM_HASHLIST_REMOVE(dqp->q_hash, dqp);
2340 		XQM_FREELIST_REMOVE(dqp);
2341 		dqpout = dqp;
2342 		XFS_DQ_HASH_UNLOCK(dqp->q_hash);
2343  mplistunlock:
2344 		xfs_qm_mplist_unlock(dqp->q_mount);
2345 		xfs_dqfunlock(dqp);
2346 		xfs_dqunlock(dqp);
2347 		if (dqpout)
2348 			break;
2349 	}
2350 
2351 	xfs_qm_freelist_unlock(xfs_Gqm);
2352 	return (dqpout);
2353 }
2354 
2355 
2356 /*------------------------------------------------------------------*/
2357 
2358 /*
2359  * Return a new incore dquot. Depending on the number of
2360  * dquots in the system, we either allocate a new one on the kernel heap,
2361  * or reclaim a free one.
2362  * Return value is B_TRUE if we allocated a new dquot, B_FALSE if we managed
2363  * to reclaim an existing one from the freelist.
2364  */
2365 boolean_t
xfs_qm_dqalloc_incore(xfs_dquot_t ** O_dqpp)2366 xfs_qm_dqalloc_incore(
2367 	xfs_dquot_t **O_dqpp)
2368 {
2369 	xfs_dquot_t	*dqp;
2370 
2371 	/*
2372 	 * Check against high water mark to see if we want to pop
2373 	 * a nincompoop dquot off the freelist.
2374 	 */
2375 	if (atomic_read(&xfs_Gqm->qm_totaldquots) >= ndquot) {
2376 		/*
2377 		 * Try to recycle a dquot from the freelist.
2378 		 */
2379 		if ((dqp = xfs_qm_dqreclaim_one())) {
2380 			XQM_STATS_INC(xqmstats.xs_qm_dqreclaims);
2381 			/*
2382 			 * Just zero the core here. The rest will get
2383 			 * reinitialized by caller. XXX we shouldn't even
2384 			 * do this zero ...
2385 			 */
2386 			memset(&dqp->q_core, 0, sizeof(dqp->q_core));
2387 			*O_dqpp = dqp;
2388 			return (B_FALSE);
2389 		}
2390 		XQM_STATS_INC(xqmstats.xs_qm_dqreclaim_misses);
2391 	}
2392 
2393 	/*
2394 	 * Allocate a brand new dquot on the kernel heap and return it
2395 	 * to the caller to initialize.
2396 	 */
2397 	ASSERT(xfs_Gqm->qm_dqzone != NULL);
2398 	*O_dqpp = kmem_zone_zalloc(xfs_Gqm->qm_dqzone, KM_SLEEP);
2399 	atomic_inc(&xfs_Gqm->qm_totaldquots);
2400 
2401 	return (B_TRUE);
2402 }
2403 
2404 
2405 /*
2406  * Start a transaction and write the incore superblock changes to
2407  * disk. flags parameter indicates which fields have changed.
2408  */
2409 int
xfs_qm_write_sb_changes(xfs_mount_t * mp,__int64_t flags)2410 xfs_qm_write_sb_changes(
2411 	xfs_mount_t	*mp,
2412 	__int64_t	flags)
2413 {
2414 	xfs_trans_t	*tp;
2415 	int		error;
2416 
2417 #ifdef QUOTADEBUG
2418 	cmn_err(CE_NOTE, "Writing superblock quota changes :%s", mp->m_fsname);
2419 #endif
2420 	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
2421 	if ((error = xfs_trans_reserve(tp, 0,
2422 				      mp->m_sb.sb_sectsize + 128, 0,
2423 				      0,
2424 				      XFS_DEFAULT_LOG_COUNT))) {
2425 		xfs_trans_cancel(tp, 0);
2426 		return (error);
2427 	}
2428 
2429 	xfs_mod_sb(tp, flags);
2430 	(void) xfs_trans_commit(tp, 0, NULL);
2431 
2432 	return (0);
2433 }
2434 
2435 
2436 /* --------------- utility functions for vnodeops ---------------- */
2437 
2438 
2439 /*
2440  * Given an inode, a uid and gid (from cred_t) make sure that we have
2441  * allocated relevant dquot(s) on disk, and that we won't exceed inode
2442  * quotas by creating this file.
2443  * This also attaches dquot(s) to the given inode after locking it,
2444  * and returns the dquots corresponding to the uid and/or gid.
2445  *
2446  * in	: inode (unlocked)
2447  * out	: udquot, gdquot with references taken and unlocked
2448  */
2449 int
xfs_qm_vop_dqalloc(xfs_mount_t * mp,xfs_inode_t * ip,uid_t uid,gid_t gid,uint flags,xfs_dquot_t ** O_udqpp,xfs_dquot_t ** O_gdqpp)2450 xfs_qm_vop_dqalloc(
2451 	xfs_mount_t	*mp,
2452 	xfs_inode_t	*ip,
2453 	uid_t		uid,
2454 	gid_t		gid,
2455 	uint		flags,
2456 	xfs_dquot_t	**O_udqpp,
2457 	xfs_dquot_t	**O_gdqpp)
2458 {
2459 	int		error;
2460 	xfs_dquot_t	*uq, *gq;
2461 	uint		lockflags;
2462 
2463 	if (!XFS_IS_QUOTA_ON(mp))
2464 		return 0;
2465 
2466 	lockflags = XFS_ILOCK_EXCL;
2467 	xfs_ilock(ip, lockflags);
2468 
2469 	if ((flags & XFS_QMOPT_INHERIT) &&
2470 	    XFS_INHERIT_GID(ip, XFS_MTOVFS(mp)))
2471 		gid = ip->i_d.di_gid;
2472 
2473 	/*
2474 	 * Attach the dquot(s) to this inode, doing a dquot allocation
2475 	 * if necessary. The dquot(s) will not be locked.
2476 	 */
2477 	if (XFS_NOT_DQATTACHED(mp, ip)) {
2478 		if ((error = xfs_qm_dqattach(ip, XFS_QMOPT_DQALLOC |
2479 					    XFS_QMOPT_ILOCKED))) {
2480 			xfs_iunlock(ip, lockflags);
2481 			return (error);
2482 		}
2483 	}
2484 
2485 	uq = gq = NULL;
2486 	if ((flags & XFS_QMOPT_UQUOTA) &&
2487 	    XFS_IS_UQUOTA_ON(mp)) {
2488 		if (ip->i_d.di_uid != uid) {
2489 			/*
2490 			 * What we need is the dquot that has this uid, and
2491 			 * if we send the inode to dqget, the uid of the inode
2492 			 * takes priority over what's sent in the uid argument.
2493 			 * We must unlock inode here before calling dqget if
2494 			 * we're not sending the inode, because otherwise
2495 			 * we'll deadlock by doing trans_reserve while
2496 			 * holding ilock.
2497 			 */
2498 			xfs_iunlock(ip, lockflags);
2499 			if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t) uid,
2500 						 XFS_DQ_USER,
2501 						 XFS_QMOPT_DQALLOC |
2502 						 XFS_QMOPT_DOWARN,
2503 						 &uq))) {
2504 				ASSERT(error != ENOENT);
2505 				return (error);
2506 			}
2507 			/*
2508 			 * Get the ilock in the right order.
2509 			 */
2510 			xfs_dqunlock(uq);
2511 			lockflags = XFS_ILOCK_SHARED;
2512 			xfs_ilock(ip, lockflags);
2513 		} else {
2514 			/*
2515 			 * Take an extra reference, because we'll return
2516 			 * this to caller
2517 			 */
2518 			ASSERT(ip->i_udquot);
2519 			uq = ip->i_udquot;
2520 			xfs_dqlock(uq);
2521 			XFS_DQHOLD(uq);
2522 			xfs_dqunlock(uq);
2523 		}
2524 	}
2525 	if ((flags & XFS_QMOPT_GQUOTA) &&
2526 	    XFS_IS_GQUOTA_ON(mp)) {
2527 		if (ip->i_d.di_gid != gid) {
2528 			xfs_iunlock(ip, lockflags);
2529 			if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)gid,
2530 						 XFS_DQ_GROUP,
2531 						 XFS_QMOPT_DQALLOC |
2532 						 XFS_QMOPT_DOWARN,
2533 						 &gq))) {
2534 				if (uq)
2535 					xfs_qm_dqrele(uq);
2536 				ASSERT(error != ENOENT);
2537 				return (error);
2538 			}
2539 			xfs_dqunlock(gq);
2540 			lockflags = XFS_ILOCK_SHARED;
2541 			xfs_ilock(ip, lockflags);
2542 		} else {
2543 			ASSERT(ip->i_gdquot);
2544 			gq = ip->i_gdquot;
2545 			xfs_dqlock(gq);
2546 			XFS_DQHOLD(gq);
2547 			xfs_dqunlock(gq);
2548 		}
2549 	}
2550 	if (uq)
2551 		xfs_dqtrace_entry_ino(uq, "DQALLOC", ip);
2552 
2553 	xfs_iunlock(ip, lockflags);
2554 	if (O_udqpp)
2555 		*O_udqpp = uq;
2556 	else if (uq)
2557 		xfs_qm_dqrele(uq);
2558 	if (O_gdqpp)
2559 		*O_gdqpp = gq;
2560 	else if (gq)
2561 		xfs_qm_dqrele(gq);
2562 	return (0);
2563 }
2564 
2565 /*
2566  * Actually transfer ownership, and do dquot modifications.
2567  * These were already reserved.
2568  */
2569 xfs_dquot_t *
xfs_qm_vop_chown(xfs_trans_t * tp,xfs_inode_t * ip,xfs_dquot_t ** IO_olddq,xfs_dquot_t * newdq)2570 xfs_qm_vop_chown(
2571 	xfs_trans_t	*tp,
2572 	xfs_inode_t	*ip,
2573 	xfs_dquot_t	**IO_olddq,
2574 	xfs_dquot_t	*newdq)
2575 {
2576 	xfs_dquot_t	*prevdq;
2577 	ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
2578 	ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
2579 
2580 	/* old dquot */
2581 	prevdq = *IO_olddq;
2582 	ASSERT(prevdq);
2583 	ASSERT(prevdq != newdq);
2584 
2585 	xfs_trans_mod_dquot(tp, prevdq,
2586 			    XFS_TRANS_DQ_BCOUNT,
2587 			    -(ip->i_d.di_nblocks));
2588 	xfs_trans_mod_dquot(tp, prevdq,
2589 			    XFS_TRANS_DQ_ICOUNT,
2590 			    -1);
2591 
2592 	/* the sparkling new dquot */
2593 	xfs_trans_mod_dquot(tp, newdq,
2594 			    XFS_TRANS_DQ_BCOUNT,
2595 			    ip->i_d.di_nblocks);
2596 	xfs_trans_mod_dquot(tp, newdq,
2597 			    XFS_TRANS_DQ_ICOUNT,
2598 			    1);
2599 
2600 	/*
2601 	 * Take an extra reference, because the inode
2602 	 * is going to keep this dquot pointer even
2603 	 * after the trans_commit.
2604 	 */
2605 	xfs_dqlock(newdq);
2606 	XFS_DQHOLD(newdq);
2607 	xfs_dqunlock(newdq);
2608 	*IO_olddq = newdq;
2609 
2610 	return (prevdq);
2611 }
2612 
2613 /*
2614  * Quota reservations for setattr(AT_UID|AT_GID).
2615  */
2616 int
xfs_qm_vop_chown_reserve(xfs_trans_t * tp,xfs_inode_t * ip,xfs_dquot_t * udqp,xfs_dquot_t * gdqp,uint flags)2617 xfs_qm_vop_chown_reserve(
2618 	xfs_trans_t	*tp,
2619 	xfs_inode_t	*ip,
2620 	xfs_dquot_t	*udqp,
2621 	xfs_dquot_t	*gdqp,
2622 	uint		flags)
2623 {
2624 	int		error;
2625 	xfs_mount_t	*mp;
2626 	uint		delblks;
2627 	xfs_dquot_t	*unresudq, *unresgdq, *delblksudq, *delblksgdq;
2628 
2629 	ASSERT(XFS_ISLOCKED_INODE(ip));
2630 	mp = ip->i_mount;
2631 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
2632 
2633 	delblks = ip->i_delayed_blks;
2634 	delblksudq = delblksgdq = unresudq = unresgdq = NULL;
2635 
2636 	if (XFS_IS_UQUOTA_ON(mp) && udqp &&
2637 	    ip->i_d.di_uid != (uid_t)INT_GET(udqp->q_core.d_id, ARCH_CONVERT)) {
2638 		delblksudq = udqp;
2639 		/*
2640 		 * If there are delayed allocation blocks, then we have to
2641 		 * unreserve those from the old dquot, and add them to the
2642 		 * new dquot.
2643 		 */
2644 		if (delblks) {
2645 			ASSERT(ip->i_udquot);
2646 			unresudq = ip->i_udquot;
2647 		}
2648 	}
2649 	if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp &&
2650 	    ip->i_d.di_gid != INT_GET(gdqp->q_core.d_id, ARCH_CONVERT)) {
2651 		delblksgdq = gdqp;
2652 		if (delblks) {
2653 			ASSERT(ip->i_gdquot);
2654 			unresgdq = ip->i_gdquot;
2655 		}
2656 	}
2657 
2658 	if ((error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount,
2659 				delblksudq, delblksgdq, ip->i_d.di_nblocks, 1,
2660 				flags | XFS_QMOPT_RES_REGBLKS)))
2661 		return (error);
2662 
2663 	/*
2664 	 * Do the delayed blks reservations/unreservations now. Since, these
2665 	 * are done without the help of a transaction, if a reservation fails
2666 	 * its previous reservations won't be automatically undone by trans
2667 	 * code. So, we have to do it manually here.
2668 	 */
2669 	if (delblks) {
2670 		/*
2671 		 * Do the reservations first. Unreservation can't fail.
2672 		 */
2673 		ASSERT(delblksudq || delblksgdq);
2674 		ASSERT(unresudq || unresgdq);
2675 		if ((error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
2676 				delblksudq, delblksgdq, (xfs_qcnt_t)delblks, 0,
2677 				flags | XFS_QMOPT_RES_REGBLKS)))
2678 			return (error);
2679 		xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
2680 				unresudq, unresgdq, -((xfs_qcnt_t)delblks), 0,
2681 				XFS_QMOPT_RES_REGBLKS);
2682 	}
2683 
2684 	return (0);
2685 }
2686 
2687 int
xfs_qm_vop_rename_dqattach(xfs_inode_t ** i_tab)2688 xfs_qm_vop_rename_dqattach(
2689 	xfs_inode_t	**i_tab)
2690 {
2691 	xfs_inode_t	*ip;
2692 	int		i;
2693 	int		error;
2694 
2695 	ip = i_tab[0];
2696 
2697 	if (! XFS_IS_QUOTA_ON(ip->i_mount))
2698 		return (0);
2699 
2700 	if (XFS_NOT_DQATTACHED(ip->i_mount, ip)) {
2701 		error = xfs_qm_dqattach(ip, 0);
2702 		if (error)
2703 			return (error);
2704 	}
2705 	for (i = 1; (i < 4 && i_tab[i]); i++) {
2706 		/*
2707 		 * Watch out for duplicate entries in the table.
2708 		 */
2709 		if ((ip = i_tab[i]) != i_tab[i-1]) {
2710 			if (XFS_NOT_DQATTACHED(ip->i_mount, ip)) {
2711 				error = xfs_qm_dqattach(ip, 0);
2712 				if (error)
2713 					return (error);
2714 			}
2715 		}
2716 	}
2717 	return (0);
2718 }
2719 
2720 void
xfs_qm_vop_dqattach_and_dqmod_newinode(xfs_trans_t * tp,xfs_inode_t * ip,xfs_dquot_t * udqp,xfs_dquot_t * gdqp)2721 xfs_qm_vop_dqattach_and_dqmod_newinode(
2722 	xfs_trans_t	*tp,
2723 	xfs_inode_t	*ip,
2724 	xfs_dquot_t	*udqp,
2725 	xfs_dquot_t	*gdqp)
2726 {
2727 	if (!XFS_IS_QUOTA_ON(tp->t_mountp))
2728 		return;
2729 
2730 	ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
2731 	ASSERT(XFS_IS_QUOTA_RUNNING(tp->t_mountp));
2732 
2733 	if (udqp) {
2734 		xfs_dqlock(udqp);
2735 		XFS_DQHOLD(udqp);
2736 		xfs_dqunlock(udqp);
2737 		ASSERT(ip->i_udquot == NULL);
2738 		ip->i_udquot = udqp;
2739 		ASSERT(ip->i_d.di_uid == INT_GET(udqp->q_core.d_id, ARCH_CONVERT));
2740 		xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
2741 	}
2742 	if (gdqp) {
2743 		xfs_dqlock(gdqp);
2744 		XFS_DQHOLD(gdqp);
2745 		xfs_dqunlock(gdqp);
2746 		ASSERT(ip->i_gdquot == NULL);
2747 		ip->i_gdquot = gdqp;
2748 		ASSERT(ip->i_d.di_gid == INT_GET(gdqp->q_core.d_id, ARCH_CONVERT));
2749 		xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
2750 	}
2751 }
2752 
2753 /* ------------- list stuff -----------------*/
2754 void
xfs_qm_freelist_init(xfs_frlist_t * ql)2755 xfs_qm_freelist_init(xfs_frlist_t *ql)
2756 {
2757 	ql->qh_next = ql->qh_prev = (xfs_dquot_t *) ql;
2758 	mutex_init(&ql->qh_lock, MUTEX_DEFAULT, "dqf");
2759 	ql->qh_version = 0;
2760 	ql->qh_nelems = 0;
2761 }
2762 
2763 void
xfs_qm_freelist_destroy(xfs_frlist_t * ql)2764 xfs_qm_freelist_destroy(xfs_frlist_t *ql)
2765 {
2766 	xfs_dquot_t	*dqp, *nextdqp;
2767 
2768 	mutex_lock(&ql->qh_lock, PINOD);
2769 	for (dqp = ql->qh_next;
2770 	     dqp != (xfs_dquot_t *)ql; ) {
2771 		xfs_dqlock(dqp);
2772 		nextdqp = dqp->dq_flnext;
2773 #ifdef QUOTADEBUG
2774 		cmn_err(CE_DEBUG, "FREELIST destroy 0x%p", dqp);
2775 #endif
2776 		XQM_FREELIST_REMOVE(dqp);
2777 		xfs_dqunlock(dqp);
2778 		xfs_qm_dqdestroy(dqp);
2779 		dqp = nextdqp;
2780 	}
2781 	/*
2782 	 * Don't bother about unlocking.
2783 	 */
2784 	mutex_destroy(&ql->qh_lock);
2785 
2786 	ASSERT(ql->qh_nelems == 0);
2787 }
2788 
2789 void
xfs_qm_freelist_insert(xfs_frlist_t * ql,xfs_dquot_t * dq)2790 xfs_qm_freelist_insert(xfs_frlist_t *ql, xfs_dquot_t *dq)
2791 {
2792 	dq->dq_flnext = ql->qh_next;
2793 	dq->dq_flprev = (xfs_dquot_t *)ql;
2794 	ql->qh_next = dq;
2795 	dq->dq_flnext->dq_flprev = dq;
2796 	xfs_Gqm->qm_dqfreelist.qh_nelems++;
2797 	xfs_Gqm->qm_dqfreelist.qh_version++;
2798 }
2799 
2800 void
xfs_qm_freelist_unlink(xfs_dquot_t * dq)2801 xfs_qm_freelist_unlink(xfs_dquot_t *dq)
2802 {
2803 	xfs_dquot_t *next = dq->dq_flnext;
2804 	xfs_dquot_t *prev = dq->dq_flprev;
2805 
2806 	next->dq_flprev = prev;
2807 	prev->dq_flnext = next;
2808 	dq->dq_flnext = dq->dq_flprev = dq;
2809 	xfs_Gqm->qm_dqfreelist.qh_nelems--;
2810 	xfs_Gqm->qm_dqfreelist.qh_version++;
2811 }
2812 
2813 void
xfs_qm_freelist_append(xfs_frlist_t * ql,xfs_dquot_t * dq)2814 xfs_qm_freelist_append(xfs_frlist_t *ql, xfs_dquot_t *dq)
2815 {
2816 	xfs_qm_freelist_insert((xfs_frlist_t *)ql->qh_prev, dq);
2817 }
2818 
2819 int
xfs_qm_dqhashlock_nowait(xfs_dquot_t * dqp)2820 xfs_qm_dqhashlock_nowait(
2821 	xfs_dquot_t *dqp)
2822 {
2823 	int locked;
2824 
2825 	locked = mutex_trylock(&((dqp)->q_hash->qh_lock));
2826 	return (locked);
2827 }
2828 
2829 int
xfs_qm_freelist_lock_nowait(xfs_qm_t * xqm)2830 xfs_qm_freelist_lock_nowait(
2831 	xfs_qm_t *xqm)
2832 {
2833 	int locked;
2834 
2835 	locked = mutex_trylock(&(xqm->qm_dqfreelist.qh_lock));
2836 	return (locked);
2837 }
2838 
2839 int
xfs_qm_mplist_nowait(xfs_mount_t * mp)2840 xfs_qm_mplist_nowait(
2841 	xfs_mount_t	*mp)
2842 {
2843 	int locked;
2844 
2845 	ASSERT(mp->m_quotainfo);
2846 	locked = mutex_trylock(&(XFS_QI_MPLLOCK(mp)));
2847 	return (locked);
2848 }
2849