1 /*
2  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 
19 #include <linux/capability.h>
20 
21 #include "xfs.h"
22 #include "xfs_fs.h"
23 #include "xfs_bit.h"
24 #include "xfs_log.h"
25 #include "xfs_inum.h"
26 #include "xfs_trans.h"
27 #include "xfs_sb.h"
28 #include "xfs_ag.h"
29 #include "xfs_alloc.h"
30 #include "xfs_quota.h"
31 #include "xfs_mount.h"
32 #include "xfs_bmap_btree.h"
33 #include "xfs_inode.h"
34 #include "xfs_itable.h"
35 #include "xfs_bmap.h"
36 #include "xfs_rtalloc.h"
37 #include "xfs_error.h"
38 #include "xfs_attr.h"
39 #include "xfs_buf_item.h"
40 #include "xfs_utils.h"
41 #include "xfs_qm.h"
42 #include "xfs_trace.h"
43 
44 STATIC int	xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint);
45 STATIC int	xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *,
46 					uint);
47 STATIC uint	xfs_qm_export_flags(uint);
48 STATIC uint	xfs_qm_export_qtype_flags(uint);
49 STATIC void	xfs_qm_export_dquot(xfs_mount_t *, xfs_disk_dquot_t *,
50 					fs_disk_quota_t *);
51 
52 
53 /*
54  * Turn off quota accounting and/or enforcement for all udquots and/or
55  * gdquots. Called only at unmount time.
56  *
57  * This assumes that there are no dquots of this file system cached
58  * incore, and modifies the ondisk dquot directly. Therefore, for example,
59  * it is an error to call this twice, without purging the cache.
60  */
61 int
xfs_qm_scall_quotaoff(xfs_mount_t * mp,uint flags)62 xfs_qm_scall_quotaoff(
63 	xfs_mount_t		*mp,
64 	uint			flags)
65 {
66 	struct xfs_quotainfo	*q = mp->m_quotainfo;
67 	uint			dqtype;
68 	int			error;
69 	uint			inactivate_flags;
70 	xfs_qoff_logitem_t	*qoffstart;
71 	int			nculprits;
72 
73 	/*
74 	 * No file system can have quotas enabled on disk but not in core.
75 	 * Note that quota utilities (like quotaoff) _expect_
76 	 * errno == EEXIST here.
77 	 */
78 	if ((mp->m_qflags & flags) == 0)
79 		return XFS_ERROR(EEXIST);
80 	error = 0;
81 
82 	flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
83 
84 	/*
85 	 * We don't want to deal with two quotaoffs messing up each other,
86 	 * so we're going to serialize it. quotaoff isn't exactly a performance
87 	 * critical thing.
88 	 * If quotaoff, then we must be dealing with the root filesystem.
89 	 */
90 	ASSERT(q);
91 	mutex_lock(&q->qi_quotaofflock);
92 
93 	/*
94 	 * If we're just turning off quota enforcement, change mp and go.
95 	 */
96 	if ((flags & XFS_ALL_QUOTA_ACCT) == 0) {
97 		mp->m_qflags &= ~(flags);
98 
99 		spin_lock(&mp->m_sb_lock);
100 		mp->m_sb.sb_qflags = mp->m_qflags;
101 		spin_unlock(&mp->m_sb_lock);
102 		mutex_unlock(&q->qi_quotaofflock);
103 
104 		/* XXX what to do if error ? Revert back to old vals incore ? */
105 		error = xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS);
106 		return (error);
107 	}
108 
109 	dqtype = 0;
110 	inactivate_flags = 0;
111 	/*
112 	 * If accounting is off, we must turn enforcement off, clear the
113 	 * quota 'CHKD' certificate to make it known that we have to
114 	 * do a quotacheck the next time this quota is turned on.
115 	 */
116 	if (flags & XFS_UQUOTA_ACCT) {
117 		dqtype |= XFS_QMOPT_UQUOTA;
118 		flags |= (XFS_UQUOTA_CHKD | XFS_UQUOTA_ENFD);
119 		inactivate_flags |= XFS_UQUOTA_ACTIVE;
120 	}
121 	if (flags & XFS_GQUOTA_ACCT) {
122 		dqtype |= XFS_QMOPT_GQUOTA;
123 		flags |= (XFS_OQUOTA_CHKD | XFS_OQUOTA_ENFD);
124 		inactivate_flags |= XFS_GQUOTA_ACTIVE;
125 	} else if (flags & XFS_PQUOTA_ACCT) {
126 		dqtype |= XFS_QMOPT_PQUOTA;
127 		flags |= (XFS_OQUOTA_CHKD | XFS_OQUOTA_ENFD);
128 		inactivate_flags |= XFS_PQUOTA_ACTIVE;
129 	}
130 
131 	/*
132 	 * Nothing to do?  Don't complain. This happens when we're just
133 	 * turning off quota enforcement.
134 	 */
135 	if ((mp->m_qflags & flags) == 0)
136 		goto out_unlock;
137 
138 	/*
139 	 * Write the LI_QUOTAOFF log record, and do SB changes atomically,
140 	 * and synchronously. If we fail to write, we should abort the
141 	 * operation as it cannot be recovered safely if we crash.
142 	 */
143 	error = xfs_qm_log_quotaoff(mp, &qoffstart, flags);
144 	if (error)
145 		goto out_unlock;
146 
147 	/*
148 	 * Next we clear the XFS_MOUNT_*DQ_ACTIVE bit(s) in the mount struct
149 	 * to take care of the race between dqget and quotaoff. We don't take
150 	 * any special locks to reset these bits. All processes need to check
151 	 * these bits *after* taking inode lock(s) to see if the particular
152 	 * quota type is in the process of being turned off. If *ACTIVE, it is
153 	 * guaranteed that all dquot structures and all quotainode ptrs will all
154 	 * stay valid as long as that inode is kept locked.
155 	 *
156 	 * There is no turning back after this.
157 	 */
158 	mp->m_qflags &= ~inactivate_flags;
159 
160 	/*
161 	 * Give back all the dquot reference(s) held by inodes.
162 	 * Here we go thru every single incore inode in this file system, and
163 	 * do a dqrele on the i_udquot/i_gdquot that it may have.
164 	 * Essentially, as long as somebody has an inode locked, this guarantees
165 	 * that quotas will not be turned off. This is handy because in a
166 	 * transaction once we lock the inode(s) and check for quotaon, we can
167 	 * depend on the quota inodes (and other things) being valid as long as
168 	 * we keep the lock(s).
169 	 */
170 	xfs_qm_dqrele_all_inodes(mp, flags);
171 
172 	/*
173 	 * Next we make the changes in the quota flag in the mount struct.
174 	 * This isn't protected by a particular lock directly, because we
175 	 * don't want to take a mrlock every time we depend on quotas being on.
176 	 */
177 	mp->m_qflags &= ~(flags);
178 
179 	/*
180 	 * Go through all the dquots of this file system and purge them,
181 	 * according to what was turned off. We may not be able to get rid
182 	 * of all dquots, because dquots can have temporary references that
183 	 * are not attached to inodes. eg. xfs_setattr, xfs_create.
184 	 * So, if we couldn't purge all the dquots from the filesystem,
185 	 * we can't get rid of the incore data structures.
186 	 */
187 	while ((nculprits = xfs_qm_dqpurge_all(mp, dqtype)))
188 		delay(10 * nculprits);
189 
190 	/*
191 	 * Transactions that had started before ACTIVE state bit was cleared
192 	 * could have logged many dquots, so they'd have higher LSNs than
193 	 * the first QUOTAOFF log record does. If we happen to crash when
194 	 * the tail of the log has gone past the QUOTAOFF record, but
195 	 * before the last dquot modification, those dquots __will__
196 	 * recover, and that's not good.
197 	 *
198 	 * So, we have QUOTAOFF start and end logitems; the start
199 	 * logitem won't get overwritten until the end logitem appears...
200 	 */
201 	error = xfs_qm_log_quotaoff_end(mp, qoffstart, flags);
202 	if (error) {
203 		/* We're screwed now. Shutdown is the only option. */
204 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
205 		goto out_unlock;
206 	}
207 
208 	/*
209 	 * If quotas is completely disabled, close shop.
210 	 */
211 	if (((flags & XFS_MOUNT_QUOTA_ALL) == XFS_MOUNT_QUOTA_SET1) ||
212 	    ((flags & XFS_MOUNT_QUOTA_ALL) == XFS_MOUNT_QUOTA_SET2)) {
213 		mutex_unlock(&q->qi_quotaofflock);
214 		xfs_qm_destroy_quotainfo(mp);
215 		return (0);
216 	}
217 
218 	/*
219 	 * Release our quotainode references if we don't need them anymore.
220 	 */
221 	if ((dqtype & XFS_QMOPT_UQUOTA) && q->qi_uquotaip) {
222 		IRELE(q->qi_uquotaip);
223 		q->qi_uquotaip = NULL;
224 	}
225 	if ((dqtype & (XFS_QMOPT_GQUOTA|XFS_QMOPT_PQUOTA)) && q->qi_gquotaip) {
226 		IRELE(q->qi_gquotaip);
227 		q->qi_gquotaip = NULL;
228 	}
229 
230 out_unlock:
231 	mutex_unlock(&q->qi_quotaofflock);
232 	return error;
233 }
234 
235 STATIC int
xfs_qm_scall_trunc_qfile(struct xfs_mount * mp,xfs_ino_t ino)236 xfs_qm_scall_trunc_qfile(
237 	struct xfs_mount	*mp,
238 	xfs_ino_t		ino)
239 {
240 	struct xfs_inode	*ip;
241 	struct xfs_trans	*tp;
242 	int			error;
243 
244 	if (ino == NULLFSINO)
245 		return 0;
246 
247 	error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
248 	if (error)
249 		return error;
250 
251 	xfs_ilock(ip, XFS_IOLOCK_EXCL);
252 
253 	tp = xfs_trans_alloc(mp, XFS_TRANS_TRUNCATE_FILE);
254 	error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0,
255 				  XFS_TRANS_PERM_LOG_RES,
256 				  XFS_ITRUNCATE_LOG_COUNT);
257 	if (error) {
258 		xfs_trans_cancel(tp, 0);
259 		xfs_iunlock(ip, XFS_IOLOCK_EXCL);
260 		goto out_put;
261 	}
262 
263 	xfs_ilock(ip, XFS_ILOCK_EXCL);
264 	xfs_trans_ijoin(tp, ip);
265 
266 	error = xfs_itruncate_finish(&tp, ip, 0, XFS_DATA_FORK, 1);
267 	if (error) {
268 		xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
269 				     XFS_TRANS_ABORT);
270 		goto out_unlock;
271 	}
272 
273 	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
274 	error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
275 
276 out_unlock:
277 	xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
278 out_put:
279 	IRELE(ip);
280 	return error;
281 }
282 
283 int
xfs_qm_scall_trunc_qfiles(xfs_mount_t * mp,uint flags)284 xfs_qm_scall_trunc_qfiles(
285 	xfs_mount_t	*mp,
286 	uint		flags)
287 {
288 	int		error = 0, error2 = 0;
289 
290 	if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) {
291 		xfs_debug(mp, "%s: flags=%x m_qflags=%x\n",
292 			__func__, flags, mp->m_qflags);
293 		return XFS_ERROR(EINVAL);
294 	}
295 
296 	if (flags & XFS_DQ_USER)
297 		error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_uquotino);
298 	if (flags & (XFS_DQ_GROUP|XFS_DQ_PROJ))
299 		error2 = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_gquotino);
300 
301 	return error ? error : error2;
302 }
303 
304 /*
305  * Switch on (a given) quota enforcement for a filesystem.  This takes
306  * effect immediately.
307  * (Switching on quota accounting must be done at mount time.)
308  */
309 int
xfs_qm_scall_quotaon(xfs_mount_t * mp,uint flags)310 xfs_qm_scall_quotaon(
311 	xfs_mount_t	*mp,
312 	uint		flags)
313 {
314 	int		error;
315 	uint		qf;
316 	__int64_t	sbflags;
317 
318 	flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
319 	/*
320 	 * Switching on quota accounting must be done at mount time.
321 	 */
322 	flags &= ~(XFS_ALL_QUOTA_ACCT);
323 
324 	sbflags = 0;
325 
326 	if (flags == 0) {
327 		xfs_debug(mp, "%s: zero flags, m_qflags=%x\n",
328 			__func__, mp->m_qflags);
329 		return XFS_ERROR(EINVAL);
330 	}
331 
332 	/* No fs can turn on quotas with a delayed effect */
333 	ASSERT((flags & XFS_ALL_QUOTA_ACCT) == 0);
334 
335 	/*
336 	 * Can't enforce without accounting. We check the superblock
337 	 * qflags here instead of m_qflags because rootfs can have
338 	 * quota acct on ondisk without m_qflags' knowing.
339 	 */
340 	if (((flags & XFS_UQUOTA_ACCT) == 0 &&
341 	    (mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 &&
342 	    (flags & XFS_UQUOTA_ENFD))
343 	    ||
344 	    ((flags & XFS_PQUOTA_ACCT) == 0 &&
345 	    (mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 &&
346 	    (flags & XFS_GQUOTA_ACCT) == 0 &&
347 	    (mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 &&
348 	    (flags & XFS_OQUOTA_ENFD))) {
349 		xfs_debug(mp,
350 			"%s: Can't enforce without acct, flags=%x sbflags=%x\n",
351 			__func__, flags, mp->m_sb.sb_qflags);
352 		return XFS_ERROR(EINVAL);
353 	}
354 	/*
355 	 * If everything's up to-date incore, then don't waste time.
356 	 */
357 	if ((mp->m_qflags & flags) == flags)
358 		return XFS_ERROR(EEXIST);
359 
360 	/*
361 	 * Change sb_qflags on disk but not incore mp->qflags
362 	 * if this is the root filesystem.
363 	 */
364 	spin_lock(&mp->m_sb_lock);
365 	qf = mp->m_sb.sb_qflags;
366 	mp->m_sb.sb_qflags = qf | flags;
367 	spin_unlock(&mp->m_sb_lock);
368 
369 	/*
370 	 * There's nothing to change if it's the same.
371 	 */
372 	if ((qf & flags) == flags && sbflags == 0)
373 		return XFS_ERROR(EEXIST);
374 	sbflags |= XFS_SB_QFLAGS;
375 
376 	if ((error = xfs_qm_write_sb_changes(mp, sbflags)))
377 		return (error);
378 	/*
379 	 * If we aren't trying to switch on quota enforcement, we are done.
380 	 */
381 	if  (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) !=
382 	     (mp->m_qflags & XFS_UQUOTA_ACCT)) ||
383 	     ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) !=
384 	     (mp->m_qflags & XFS_PQUOTA_ACCT)) ||
385 	     ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) !=
386 	     (mp->m_qflags & XFS_GQUOTA_ACCT)) ||
387 	    (flags & XFS_ALL_QUOTA_ENFD) == 0)
388 		return (0);
389 
390 	if (! XFS_IS_QUOTA_RUNNING(mp))
391 		return XFS_ERROR(ESRCH);
392 
393 	/*
394 	 * Switch on quota enforcement in core.
395 	 */
396 	mutex_lock(&mp->m_quotainfo->qi_quotaofflock);
397 	mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD);
398 	mutex_unlock(&mp->m_quotainfo->qi_quotaofflock);
399 
400 	return (0);
401 }
402 
403 
404 /*
405  * Return quota status information, such as uquota-off, enforcements, etc.
406  */
407 int
xfs_qm_scall_getqstat(struct xfs_mount * mp,struct fs_quota_stat * out)408 xfs_qm_scall_getqstat(
409 	struct xfs_mount	*mp,
410 	struct fs_quota_stat	*out)
411 {
412 	struct xfs_quotainfo	*q = mp->m_quotainfo;
413 	struct xfs_inode	*uip, *gip;
414 	boolean_t		tempuqip, tempgqip;
415 
416 	uip = gip = NULL;
417 	tempuqip = tempgqip = B_FALSE;
418 	memset(out, 0, sizeof(fs_quota_stat_t));
419 
420 	out->qs_version = FS_QSTAT_VERSION;
421 	if (!xfs_sb_version_hasquota(&mp->m_sb)) {
422 		out->qs_uquota.qfs_ino = NULLFSINO;
423 		out->qs_gquota.qfs_ino = NULLFSINO;
424 		return (0);
425 	}
426 	out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags &
427 							(XFS_ALL_QUOTA_ACCT|
428 							 XFS_ALL_QUOTA_ENFD));
429 	out->qs_pad = 0;
430 	out->qs_uquota.qfs_ino = mp->m_sb.sb_uquotino;
431 	out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino;
432 
433 	if (q) {
434 		uip = q->qi_uquotaip;
435 		gip = q->qi_gquotaip;
436 	}
437 	if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) {
438 		if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
439 					0, 0, &uip) == 0)
440 			tempuqip = B_TRUE;
441 	}
442 	if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) {
443 		if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
444 					0, 0, &gip) == 0)
445 			tempgqip = B_TRUE;
446 	}
447 	if (uip) {
448 		out->qs_uquota.qfs_nblks = uip->i_d.di_nblocks;
449 		out->qs_uquota.qfs_nextents = uip->i_d.di_nextents;
450 		if (tempuqip)
451 			IRELE(uip);
452 	}
453 	if (gip) {
454 		out->qs_gquota.qfs_nblks = gip->i_d.di_nblocks;
455 		out->qs_gquota.qfs_nextents = gip->i_d.di_nextents;
456 		if (tempgqip)
457 			IRELE(gip);
458 	}
459 	if (q) {
460 		out->qs_incoredqs = q->qi_dquots;
461 		out->qs_btimelimit = q->qi_btimelimit;
462 		out->qs_itimelimit = q->qi_itimelimit;
463 		out->qs_rtbtimelimit = q->qi_rtbtimelimit;
464 		out->qs_bwarnlimit = q->qi_bwarnlimit;
465 		out->qs_iwarnlimit = q->qi_iwarnlimit;
466 	}
467 	return 0;
468 }
469 
470 #define XFS_DQ_MASK \
471 	(FS_DQ_LIMIT_MASK | FS_DQ_TIMER_MASK | FS_DQ_WARNS_MASK)
472 
473 /*
474  * Adjust quota limits, and start/stop timers accordingly.
475  */
476 int
xfs_qm_scall_setqlim(xfs_mount_t * mp,xfs_dqid_t id,uint type,fs_disk_quota_t * newlim)477 xfs_qm_scall_setqlim(
478 	xfs_mount_t		*mp,
479 	xfs_dqid_t		id,
480 	uint			type,
481 	fs_disk_quota_t		*newlim)
482 {
483 	struct xfs_quotainfo	*q = mp->m_quotainfo;
484 	xfs_disk_dquot_t	*ddq;
485 	xfs_dquot_t		*dqp;
486 	xfs_trans_t		*tp;
487 	int			error;
488 	xfs_qcnt_t		hard, soft;
489 
490 	if (newlim->d_fieldmask & ~XFS_DQ_MASK)
491 		return EINVAL;
492 	if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0)
493 		return 0;
494 
495 	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM);
496 	if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_disk_dquot_t) + 128,
497 				      0, 0, XFS_DEFAULT_LOG_COUNT))) {
498 		xfs_trans_cancel(tp, 0);
499 		return (error);
500 	}
501 
502 	/*
503 	 * We don't want to race with a quotaoff so take the quotaoff lock.
504 	 * (We don't hold an inode lock, so there's nothing else to stop
505 	 * a quotaoff from happening). (XXXThis doesn't currently happen
506 	 * because we take the vfslock before calling xfs_qm_sysent).
507 	 */
508 	mutex_lock(&q->qi_quotaofflock);
509 
510 	/*
511 	 * Get the dquot (locked), and join it to the transaction.
512 	 * Allocate the dquot if this doesn't exist.
513 	 */
514 	if ((error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp))) {
515 		xfs_trans_cancel(tp, XFS_TRANS_ABORT);
516 		ASSERT(error != ENOENT);
517 		goto out_unlock;
518 	}
519 	xfs_trans_dqjoin(tp, dqp);
520 	ddq = &dqp->q_core;
521 
522 	/*
523 	 * Make sure that hardlimits are >= soft limits before changing.
524 	 */
525 	hard = (newlim->d_fieldmask & FS_DQ_BHARD) ?
526 		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) :
527 			be64_to_cpu(ddq->d_blk_hardlimit);
528 	soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ?
529 		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) :
530 			be64_to_cpu(ddq->d_blk_softlimit);
531 	if (hard == 0 || hard >= soft) {
532 		ddq->d_blk_hardlimit = cpu_to_be64(hard);
533 		ddq->d_blk_softlimit = cpu_to_be64(soft);
534 		if (id == 0) {
535 			q->qi_bhardlimit = hard;
536 			q->qi_bsoftlimit = soft;
537 		}
538 	} else {
539 		xfs_debug(mp, "blkhard %Ld < blksoft %Ld\n", hard, soft);
540 	}
541 	hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ?
542 		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) :
543 			be64_to_cpu(ddq->d_rtb_hardlimit);
544 	soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ?
545 		(xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) :
546 			be64_to_cpu(ddq->d_rtb_softlimit);
547 	if (hard == 0 || hard >= soft) {
548 		ddq->d_rtb_hardlimit = cpu_to_be64(hard);
549 		ddq->d_rtb_softlimit = cpu_to_be64(soft);
550 		if (id == 0) {
551 			q->qi_rtbhardlimit = hard;
552 			q->qi_rtbsoftlimit = soft;
553 		}
554 	} else {
555 		xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld\n", hard, soft);
556 	}
557 
558 	hard = (newlim->d_fieldmask & FS_DQ_IHARD) ?
559 		(xfs_qcnt_t) newlim->d_ino_hardlimit :
560 			be64_to_cpu(ddq->d_ino_hardlimit);
561 	soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ?
562 		(xfs_qcnt_t) newlim->d_ino_softlimit :
563 			be64_to_cpu(ddq->d_ino_softlimit);
564 	if (hard == 0 || hard >= soft) {
565 		ddq->d_ino_hardlimit = cpu_to_be64(hard);
566 		ddq->d_ino_softlimit = cpu_to_be64(soft);
567 		if (id == 0) {
568 			q->qi_ihardlimit = hard;
569 			q->qi_isoftlimit = soft;
570 		}
571 	} else {
572 		xfs_debug(mp, "ihard %Ld < isoft %Ld\n", hard, soft);
573 	}
574 
575 	/*
576 	 * Update warnings counter(s) if requested
577 	 */
578 	if (newlim->d_fieldmask & FS_DQ_BWARNS)
579 		ddq->d_bwarns = cpu_to_be16(newlim->d_bwarns);
580 	if (newlim->d_fieldmask & FS_DQ_IWARNS)
581 		ddq->d_iwarns = cpu_to_be16(newlim->d_iwarns);
582 	if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
583 		ddq->d_rtbwarns = cpu_to_be16(newlim->d_rtbwarns);
584 
585 	if (id == 0) {
586 		/*
587 		 * Timelimits for the super user set the relative time
588 		 * the other users can be over quota for this file system.
589 		 * If it is zero a default is used.  Ditto for the default
590 		 * soft and hard limit values (already done, above), and
591 		 * for warnings.
592 		 */
593 		if (newlim->d_fieldmask & FS_DQ_BTIMER) {
594 			q->qi_btimelimit = newlim->d_btimer;
595 			ddq->d_btimer = cpu_to_be32(newlim->d_btimer);
596 		}
597 		if (newlim->d_fieldmask & FS_DQ_ITIMER) {
598 			q->qi_itimelimit = newlim->d_itimer;
599 			ddq->d_itimer = cpu_to_be32(newlim->d_itimer);
600 		}
601 		if (newlim->d_fieldmask & FS_DQ_RTBTIMER) {
602 			q->qi_rtbtimelimit = newlim->d_rtbtimer;
603 			ddq->d_rtbtimer = cpu_to_be32(newlim->d_rtbtimer);
604 		}
605 		if (newlim->d_fieldmask & FS_DQ_BWARNS)
606 			q->qi_bwarnlimit = newlim->d_bwarns;
607 		if (newlim->d_fieldmask & FS_DQ_IWARNS)
608 			q->qi_iwarnlimit = newlim->d_iwarns;
609 		if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
610 			q->qi_rtbwarnlimit = newlim->d_rtbwarns;
611 	} else {
612 		/*
613 		 * If the user is now over quota, start the timelimit.
614 		 * The user will not be 'warned'.
615 		 * Note that we keep the timers ticking, whether enforcement
616 		 * is on or off. We don't really want to bother with iterating
617 		 * over all ondisk dquots and turning the timers on/off.
618 		 */
619 		xfs_qm_adjust_dqtimers(mp, ddq);
620 	}
621 	dqp->dq_flags |= XFS_DQ_DIRTY;
622 	xfs_trans_log_dquot(tp, dqp);
623 
624 	error = xfs_trans_commit(tp, 0);
625 	xfs_qm_dqprint(dqp);
626 	xfs_qm_dqrele(dqp);
627 
628  out_unlock:
629 	mutex_unlock(&q->qi_quotaofflock);
630 	return error;
631 }
632 
633 int
xfs_qm_scall_getquota(xfs_mount_t * mp,xfs_dqid_t id,uint type,fs_disk_quota_t * out)634 xfs_qm_scall_getquota(
635 	xfs_mount_t	*mp,
636 	xfs_dqid_t	id,
637 	uint		type,
638 	fs_disk_quota_t *out)
639 {
640 	xfs_dquot_t	*dqp;
641 	int		error;
642 
643 	/*
644 	 * Try to get the dquot. We don't want it allocated on disk, so
645 	 * we aren't passing the XFS_QMOPT_DOALLOC flag. If it doesn't
646 	 * exist, we'll get ENOENT back.
647 	 */
648 	if ((error = xfs_qm_dqget(mp, NULL, id, type, 0, &dqp))) {
649 		return (error);
650 	}
651 
652 	/*
653 	 * If everything's NULL, this dquot doesn't quite exist as far as
654 	 * our utility programs are concerned.
655 	 */
656 	if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
657 		xfs_qm_dqput(dqp);
658 		return XFS_ERROR(ENOENT);
659 	}
660 	/* xfs_qm_dqprint(dqp); */
661 	/*
662 	 * Convert the disk dquot to the exportable format
663 	 */
664 	xfs_qm_export_dquot(mp, &dqp->q_core, out);
665 	xfs_qm_dqput(dqp);
666 	return (error ? XFS_ERROR(EFAULT) : 0);
667 }
668 
669 
670 STATIC int
xfs_qm_log_quotaoff_end(xfs_mount_t * mp,xfs_qoff_logitem_t * startqoff,uint flags)671 xfs_qm_log_quotaoff_end(
672 	xfs_mount_t		*mp,
673 	xfs_qoff_logitem_t	*startqoff,
674 	uint			flags)
675 {
676 	xfs_trans_t		*tp;
677 	int			error;
678 	xfs_qoff_logitem_t	*qoffi;
679 
680 	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF_END);
681 
682 	if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_qoff_logitem_t) * 2,
683 				      0, 0, XFS_DEFAULT_LOG_COUNT))) {
684 		xfs_trans_cancel(tp, 0);
685 		return (error);
686 	}
687 
688 	qoffi = xfs_trans_get_qoff_item(tp, startqoff,
689 					flags & XFS_ALL_QUOTA_ACCT);
690 	xfs_trans_log_quotaoff_item(tp, qoffi);
691 
692 	/*
693 	 * We have to make sure that the transaction is secure on disk before we
694 	 * return and actually stop quota accounting. So, make it synchronous.
695 	 * We don't care about quotoff's performance.
696 	 */
697 	xfs_trans_set_sync(tp);
698 	error = xfs_trans_commit(tp, 0);
699 	return (error);
700 }
701 
702 
703 STATIC int
xfs_qm_log_quotaoff(xfs_mount_t * mp,xfs_qoff_logitem_t ** qoffstartp,uint flags)704 xfs_qm_log_quotaoff(
705 	xfs_mount_t	       *mp,
706 	xfs_qoff_logitem_t     **qoffstartp,
707 	uint		       flags)
708 {
709 	xfs_trans_t	       *tp;
710 	int			error;
711 	xfs_qoff_logitem_t     *qoffi=NULL;
712 	uint			oldsbqflag=0;
713 
714 	tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF);
715 	if ((error = xfs_trans_reserve(tp, 0,
716 				      sizeof(xfs_qoff_logitem_t) * 2 +
717 				      mp->m_sb.sb_sectsize + 128,
718 				      0,
719 				      0,
720 				      XFS_DEFAULT_LOG_COUNT))) {
721 		goto error0;
722 	}
723 
724 	qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT);
725 	xfs_trans_log_quotaoff_item(tp, qoffi);
726 
727 	spin_lock(&mp->m_sb_lock);
728 	oldsbqflag = mp->m_sb.sb_qflags;
729 	mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL;
730 	spin_unlock(&mp->m_sb_lock);
731 
732 	xfs_mod_sb(tp, XFS_SB_QFLAGS);
733 
734 	/*
735 	 * We have to make sure that the transaction is secure on disk before we
736 	 * return and actually stop quota accounting. So, make it synchronous.
737 	 * We don't care about quotoff's performance.
738 	 */
739 	xfs_trans_set_sync(tp);
740 	error = xfs_trans_commit(tp, 0);
741 
742 error0:
743 	if (error) {
744 		xfs_trans_cancel(tp, 0);
745 		/*
746 		 * No one else is modifying sb_qflags, so this is OK.
747 		 * We still hold the quotaofflock.
748 		 */
749 		spin_lock(&mp->m_sb_lock);
750 		mp->m_sb.sb_qflags = oldsbqflag;
751 		spin_unlock(&mp->m_sb_lock);
752 	}
753 	*qoffstartp = qoffi;
754 	return (error);
755 }
756 
757 
758 /*
759  * Translate an internal style on-disk-dquot to the exportable format.
760  * The main differences are that the counters/limits are all in Basic
761  * Blocks (BBs) instead of the internal FSBs, and all on-disk data has
762  * to be converted to the native endianness.
763  */
764 STATIC void
xfs_qm_export_dquot(xfs_mount_t * mp,xfs_disk_dquot_t * src,struct fs_disk_quota * dst)765 xfs_qm_export_dquot(
766 	xfs_mount_t		*mp,
767 	xfs_disk_dquot_t	*src,
768 	struct fs_disk_quota	*dst)
769 {
770 	memset(dst, 0, sizeof(*dst));
771 	dst->d_version = FS_DQUOT_VERSION;  /* different from src->d_version */
772 	dst->d_flags = xfs_qm_export_qtype_flags(src->d_flags);
773 	dst->d_id = be32_to_cpu(src->d_id);
774 	dst->d_blk_hardlimit =
775 		XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_blk_hardlimit));
776 	dst->d_blk_softlimit =
777 		XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_blk_softlimit));
778 	dst->d_ino_hardlimit = be64_to_cpu(src->d_ino_hardlimit);
779 	dst->d_ino_softlimit = be64_to_cpu(src->d_ino_softlimit);
780 	dst->d_bcount = XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_bcount));
781 	dst->d_icount = be64_to_cpu(src->d_icount);
782 	dst->d_btimer = be32_to_cpu(src->d_btimer);
783 	dst->d_itimer = be32_to_cpu(src->d_itimer);
784 	dst->d_iwarns = be16_to_cpu(src->d_iwarns);
785 	dst->d_bwarns = be16_to_cpu(src->d_bwarns);
786 	dst->d_rtb_hardlimit =
787 		XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_rtb_hardlimit));
788 	dst->d_rtb_softlimit =
789 		XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_rtb_softlimit));
790 	dst->d_rtbcount = XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_rtbcount));
791 	dst->d_rtbtimer = be32_to_cpu(src->d_rtbtimer);
792 	dst->d_rtbwarns = be16_to_cpu(src->d_rtbwarns);
793 
794 	/*
795 	 * Internally, we don't reset all the timers when quota enforcement
796 	 * gets turned off. No need to confuse the user level code,
797 	 * so return zeroes in that case.
798 	 */
799 	if ((!XFS_IS_UQUOTA_ENFORCED(mp) && src->d_flags == XFS_DQ_USER) ||
800 	    (!XFS_IS_OQUOTA_ENFORCED(mp) &&
801 			(src->d_flags & (XFS_DQ_PROJ | XFS_DQ_GROUP)))) {
802 		dst->d_btimer = 0;
803 		dst->d_itimer = 0;
804 		dst->d_rtbtimer = 0;
805 	}
806 
807 #ifdef DEBUG
808 	if (((XFS_IS_UQUOTA_ENFORCED(mp) && dst->d_flags == FS_USER_QUOTA) ||
809 	     (XFS_IS_OQUOTA_ENFORCED(mp) &&
810 			(dst->d_flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)))) &&
811 	    dst->d_id != 0) {
812 		if (((int) dst->d_bcount >= (int) dst->d_blk_softlimit) &&
813 		    (dst->d_blk_softlimit > 0)) {
814 			ASSERT(dst->d_btimer != 0);
815 		}
816 		if (((int) dst->d_icount >= (int) dst->d_ino_softlimit) &&
817 		    (dst->d_ino_softlimit > 0)) {
818 			ASSERT(dst->d_itimer != 0);
819 		}
820 	}
821 #endif
822 }
823 
824 STATIC uint
xfs_qm_export_qtype_flags(uint flags)825 xfs_qm_export_qtype_flags(
826 	uint flags)
827 {
828 	/*
829 	 * Can't be more than one, or none.
830 	 */
831 	ASSERT((flags & (FS_PROJ_QUOTA | FS_USER_QUOTA)) !=
832 		(FS_PROJ_QUOTA | FS_USER_QUOTA));
833 	ASSERT((flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)) !=
834 		(FS_PROJ_QUOTA | FS_GROUP_QUOTA));
835 	ASSERT((flags & (FS_USER_QUOTA | FS_GROUP_QUOTA)) !=
836 		(FS_USER_QUOTA | FS_GROUP_QUOTA));
837 	ASSERT((flags & (FS_PROJ_QUOTA|FS_USER_QUOTA|FS_GROUP_QUOTA)) != 0);
838 
839 	return (flags & XFS_DQ_USER) ?
840 		FS_USER_QUOTA : (flags & XFS_DQ_PROJ) ?
841 			FS_PROJ_QUOTA : FS_GROUP_QUOTA;
842 }
843 
844 STATIC uint
xfs_qm_export_flags(uint flags)845 xfs_qm_export_flags(
846 	uint flags)
847 {
848 	uint uflags;
849 
850 	uflags = 0;
851 	if (flags & XFS_UQUOTA_ACCT)
852 		uflags |= FS_QUOTA_UDQ_ACCT;
853 	if (flags & XFS_PQUOTA_ACCT)
854 		uflags |= FS_QUOTA_PDQ_ACCT;
855 	if (flags & XFS_GQUOTA_ACCT)
856 		uflags |= FS_QUOTA_GDQ_ACCT;
857 	if (flags & XFS_UQUOTA_ENFD)
858 		uflags |= FS_QUOTA_UDQ_ENFD;
859 	if (flags & (XFS_OQUOTA_ENFD)) {
860 		uflags |= (flags & XFS_GQUOTA_ACCT) ?
861 			FS_QUOTA_GDQ_ENFD : FS_QUOTA_PDQ_ENFD;
862 	}
863 	return (uflags);
864 }
865 
866 
867 STATIC int
xfs_dqrele_inode(struct xfs_inode * ip,struct xfs_perag * pag,int flags)868 xfs_dqrele_inode(
869 	struct xfs_inode	*ip,
870 	struct xfs_perag	*pag,
871 	int			flags)
872 {
873 	/* skip quota inodes */
874 	if (ip == ip->i_mount->m_quotainfo->qi_uquotaip ||
875 	    ip == ip->i_mount->m_quotainfo->qi_gquotaip) {
876 		ASSERT(ip->i_udquot == NULL);
877 		ASSERT(ip->i_gdquot == NULL);
878 		return 0;
879 	}
880 
881 	xfs_ilock(ip, XFS_ILOCK_EXCL);
882 	if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) {
883 		xfs_qm_dqrele(ip->i_udquot);
884 		ip->i_udquot = NULL;
885 	}
886 	if (flags & (XFS_PQUOTA_ACCT|XFS_GQUOTA_ACCT) && ip->i_gdquot) {
887 		xfs_qm_dqrele(ip->i_gdquot);
888 		ip->i_gdquot = NULL;
889 	}
890 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
891 	return 0;
892 }
893 
894 
895 /*
896  * Go thru all the inodes in the file system, releasing their dquots.
897  *
898  * Note that the mount structure gets modified to indicate that quotas are off
899  * AFTER this, in the case of quotaoff.
900  */
901 void
xfs_qm_dqrele_all_inodes(struct xfs_mount * mp,uint flags)902 xfs_qm_dqrele_all_inodes(
903 	struct xfs_mount *mp,
904 	uint		 flags)
905 {
906 	ASSERT(mp->m_quotainfo);
907 	xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags);
908 }
909 
910 /*------------------------------------------------------------------------*/
911 #ifdef DEBUG
912 /*
913  * This contains all the test functions for XFS disk quotas.
914  * Currently it does a quota accounting check. ie. it walks through
915  * all inodes in the file system, calculating the dquot accounting fields,
916  * and prints out any inconsistencies.
917  */
918 xfs_dqhash_t *qmtest_udqtab;
919 xfs_dqhash_t *qmtest_gdqtab;
920 int	      qmtest_hashmask;
921 int	      qmtest_nfails;
922 struct mutex  qcheck_lock;
923 
924 #define DQTEST_HASHVAL(mp, id) (((__psunsigned_t)(mp) + \
925 				 (__psunsigned_t)(id)) & \
926 				(qmtest_hashmask - 1))
927 
928 #define DQTEST_HASH(mp, id, type)   ((type & XFS_DQ_USER) ? \
929 				     (qmtest_udqtab + \
930 				      DQTEST_HASHVAL(mp, id)) : \
931 				     (qmtest_gdqtab + \
932 				      DQTEST_HASHVAL(mp, id)))
933 
934 #define DQTEST_LIST_PRINT(l, NXT, title) \
935 { \
936 	  xfs_dqtest_t	*dqp; int i = 0;\
937 	  xfs_debug(NULL, "%s (#%d)", title, (int) (l)->qh_nelems); \
938 	  for (dqp = (xfs_dqtest_t *)(l)->qh_next; dqp != NULL; \
939 	       dqp = (xfs_dqtest_t *)dqp->NXT) { \
940 		xfs_debug(dqp->q_mount,		\
941 			"  %d. \"%d (%s)\"  bcnt = %d, icnt = %d", \
942 			 ++i, dqp->d_id, DQFLAGTO_TYPESTR(dqp),	     \
943 			 dqp->d_bcount, dqp->d_icount); } \
944 }
945 
946 typedef struct dqtest {
947 	uint		 dq_flags;	/* various flags (XFS_DQ_*) */
948 	struct list_head q_hashlist;
949 	xfs_dqhash_t	*q_hash;	/* the hashchain header */
950 	xfs_mount_t	*q_mount;	/* filesystem this relates to */
951 	xfs_dqid_t	d_id;		/* user id or group id */
952 	xfs_qcnt_t	d_bcount;	/* # disk blocks owned by the user */
953 	xfs_qcnt_t	d_icount;	/* # inodes owned by the user */
954 } xfs_dqtest_t;
955 
956 STATIC void
xfs_qm_hashinsert(xfs_dqhash_t * h,xfs_dqtest_t * dqp)957 xfs_qm_hashinsert(xfs_dqhash_t *h, xfs_dqtest_t *dqp)
958 {
959 	list_add(&dqp->q_hashlist, &h->qh_list);
960 	h->qh_version++;
961 	h->qh_nelems++;
962 }
963 STATIC void
xfs_qm_dqtest_print(struct xfs_mount * mp,struct dqtest * d)964 xfs_qm_dqtest_print(
965 	struct xfs_mount	*mp,
966 	struct dqtest		*d)
967 {
968 	xfs_debug(mp, "-----------DQTEST DQUOT----------------");
969 	xfs_debug(mp, "---- dquot ID = %d", d->d_id);
970 	xfs_debug(mp, "---- fs       = 0x%p", d->q_mount);
971 	xfs_debug(mp, "---- bcount   = %Lu (0x%x)",
972 		d->d_bcount, (int)d->d_bcount);
973 	xfs_debug(mp, "---- icount   = %Lu (0x%x)",
974 		d->d_icount, (int)d->d_icount);
975 	xfs_debug(mp, "---------------------------");
976 }
977 
978 STATIC void
xfs_qm_dqtest_failed(xfs_dqtest_t * d,xfs_dquot_t * dqp,char * reason,xfs_qcnt_t a,xfs_qcnt_t b,int error)979 xfs_qm_dqtest_failed(
980 	xfs_dqtest_t	*d,
981 	xfs_dquot_t	*dqp,
982 	char		*reason,
983 	xfs_qcnt_t	a,
984 	xfs_qcnt_t	b,
985 	int		error)
986 {
987 	qmtest_nfails++;
988 	if (error)
989 		xfs_debug(dqp->q_mount,
990 			"quotacheck failed id=%d, err=%d\nreason: %s",
991 			d->d_id, error, reason);
992 	else
993 		xfs_debug(dqp->q_mount,
994 			"quotacheck failed id=%d (%s) [%d != %d]",
995 			d->d_id, reason, (int)a, (int)b);
996 	xfs_qm_dqtest_print(dqp->q_mount, d);
997 	if (dqp)
998 		xfs_qm_dqprint(dqp);
999 }
1000 
1001 STATIC int
xfs_dqtest_cmp2(xfs_dqtest_t * d,xfs_dquot_t * dqp)1002 xfs_dqtest_cmp2(
1003 	xfs_dqtest_t	*d,
1004 	xfs_dquot_t	*dqp)
1005 {
1006 	int err = 0;
1007 	if (be64_to_cpu(dqp->q_core.d_icount) != d->d_icount) {
1008 		xfs_qm_dqtest_failed(d, dqp, "icount mismatch",
1009 			be64_to_cpu(dqp->q_core.d_icount),
1010 			d->d_icount, 0);
1011 		err++;
1012 	}
1013 	if (be64_to_cpu(dqp->q_core.d_bcount) != d->d_bcount) {
1014 		xfs_qm_dqtest_failed(d, dqp, "bcount mismatch",
1015 			be64_to_cpu(dqp->q_core.d_bcount),
1016 			d->d_bcount, 0);
1017 		err++;
1018 	}
1019 	if (dqp->q_core.d_blk_softlimit &&
1020 	    be64_to_cpu(dqp->q_core.d_bcount) >=
1021 	    be64_to_cpu(dqp->q_core.d_blk_softlimit)) {
1022 		if (!dqp->q_core.d_btimer && dqp->q_core.d_id) {
1023 			xfs_debug(dqp->q_mount,
1024 				"%d [%s] BLK TIMER NOT STARTED",
1025 				d->d_id, DQFLAGTO_TYPESTR(d));
1026 			err++;
1027 		}
1028 	}
1029 	if (dqp->q_core.d_ino_softlimit &&
1030 	    be64_to_cpu(dqp->q_core.d_icount) >=
1031 	    be64_to_cpu(dqp->q_core.d_ino_softlimit)) {
1032 		if (!dqp->q_core.d_itimer && dqp->q_core.d_id) {
1033 			xfs_debug(dqp->q_mount,
1034 				"%d [%s] INO TIMER NOT STARTED",
1035 				d->d_id, DQFLAGTO_TYPESTR(d));
1036 			err++;
1037 		}
1038 	}
1039 #ifdef QUOTADEBUG
1040 	if (!err) {
1041 		xfs_debug(dqp->q_mount, "%d [%s] qchecked",
1042 			d->d_id, DQFLAGTO_TYPESTR(d));
1043 	}
1044 #endif
1045 	return (err);
1046 }
1047 
1048 STATIC void
xfs_dqtest_cmp(xfs_dqtest_t * d)1049 xfs_dqtest_cmp(
1050 	xfs_dqtest_t	*d)
1051 {
1052 	xfs_dquot_t	*dqp;
1053 	int		error;
1054 
1055 	/* xfs_qm_dqtest_print(d); */
1056 	if ((error = xfs_qm_dqget(d->q_mount, NULL, d->d_id, d->dq_flags, 0,
1057 				 &dqp))) {
1058 		xfs_qm_dqtest_failed(d, NULL, "dqget failed", 0, 0, error);
1059 		return;
1060 	}
1061 	xfs_dqtest_cmp2(d, dqp);
1062 	xfs_qm_dqput(dqp);
1063 }
1064 
1065 STATIC int
xfs_qm_internalqcheck_dqget(xfs_mount_t * mp,xfs_dqid_t id,uint type,xfs_dqtest_t ** O_dq)1066 xfs_qm_internalqcheck_dqget(
1067 	xfs_mount_t	*mp,
1068 	xfs_dqid_t	id,
1069 	uint		type,
1070 	xfs_dqtest_t	**O_dq)
1071 {
1072 	xfs_dqtest_t	*d;
1073 	xfs_dqhash_t	*h;
1074 
1075 	h = DQTEST_HASH(mp, id, type);
1076 	list_for_each_entry(d, &h->qh_list, q_hashlist) {
1077 		if (d->d_id == id && mp == d->q_mount) {
1078 			*O_dq = d;
1079 			return (0);
1080 		}
1081 	}
1082 	d = kmem_zalloc(sizeof(xfs_dqtest_t), KM_SLEEP);
1083 	d->dq_flags = type;
1084 	d->d_id = id;
1085 	d->q_mount = mp;
1086 	d->q_hash = h;
1087 	INIT_LIST_HEAD(&d->q_hashlist);
1088 	xfs_qm_hashinsert(h, d);
1089 	*O_dq = d;
1090 	return (0);
1091 }
1092 
1093 STATIC void
xfs_qm_internalqcheck_get_dquots(xfs_mount_t * mp,xfs_dqid_t uid,xfs_dqid_t projid,xfs_dqid_t gid,xfs_dqtest_t ** ud,xfs_dqtest_t ** gd)1094 xfs_qm_internalqcheck_get_dquots(
1095 	xfs_mount_t	*mp,
1096 	xfs_dqid_t	uid,
1097 	xfs_dqid_t	projid,
1098 	xfs_dqid_t	gid,
1099 	xfs_dqtest_t	**ud,
1100 	xfs_dqtest_t	**gd)
1101 {
1102 	if (XFS_IS_UQUOTA_ON(mp))
1103 		xfs_qm_internalqcheck_dqget(mp, uid, XFS_DQ_USER, ud);
1104 	if (XFS_IS_GQUOTA_ON(mp))
1105 		xfs_qm_internalqcheck_dqget(mp, gid, XFS_DQ_GROUP, gd);
1106 	else if (XFS_IS_PQUOTA_ON(mp))
1107 		xfs_qm_internalqcheck_dqget(mp, projid, XFS_DQ_PROJ, gd);
1108 }
1109 
1110 
1111 STATIC void
xfs_qm_internalqcheck_dqadjust(xfs_inode_t * ip,xfs_dqtest_t * d)1112 xfs_qm_internalqcheck_dqadjust(
1113 	xfs_inode_t		*ip,
1114 	xfs_dqtest_t		*d)
1115 {
1116 	d->d_icount++;
1117 	d->d_bcount += (xfs_qcnt_t)ip->i_d.di_nblocks;
1118 }
1119 
1120 STATIC int
xfs_qm_internalqcheck_adjust(xfs_mount_t * mp,xfs_ino_t ino,void __user * buffer,int ubsize,int * ubused,int * res)1121 xfs_qm_internalqcheck_adjust(
1122 	xfs_mount_t	*mp,		/* mount point for filesystem */
1123 	xfs_ino_t	ino,		/* inode number to get data for */
1124 	void		__user *buffer,	/* not used */
1125 	int		ubsize,		/* not used */
1126 	int		*ubused,	/* not used */
1127 	int		*res)		/* bulkstat result code */
1128 {
1129 	xfs_inode_t		*ip;
1130 	xfs_dqtest_t		*ud, *gd;
1131 	uint			lock_flags;
1132 	boolean_t		ipreleased;
1133 	int			error;
1134 
1135 	ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1136 
1137 	if (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino) {
1138 		*res = BULKSTAT_RV_NOTHING;
1139 		xfs_debug(mp, "%s: ino=%llu, uqino=%llu, gqino=%llu\n",
1140 			__func__, (unsigned long long) ino,
1141 			(unsigned long long) mp->m_sb.sb_uquotino,
1142 			(unsigned long long) mp->m_sb.sb_gquotino);
1143 		return XFS_ERROR(EINVAL);
1144 	}
1145 	ipreleased = B_FALSE;
1146  again:
1147 	lock_flags = XFS_ILOCK_SHARED;
1148 	if ((error = xfs_iget(mp, NULL, ino, 0, lock_flags, &ip))) {
1149 		*res = BULKSTAT_RV_NOTHING;
1150 		return (error);
1151 	}
1152 
1153 	/*
1154 	 * This inode can have blocks after eof which can get released
1155 	 * when we send it to inactive. Since we don't check the dquot
1156 	 * until the after all our calculations are done, we must get rid
1157 	 * of those now.
1158 	 */
1159 	if (! ipreleased) {
1160 		xfs_iunlock(ip, lock_flags);
1161 		IRELE(ip);
1162 		ipreleased = B_TRUE;
1163 		goto again;
1164 	}
1165 	xfs_qm_internalqcheck_get_dquots(mp,
1166 					(xfs_dqid_t) ip->i_d.di_uid,
1167 					(xfs_dqid_t) xfs_get_projid(ip),
1168 					(xfs_dqid_t) ip->i_d.di_gid,
1169 					&ud, &gd);
1170 	if (XFS_IS_UQUOTA_ON(mp)) {
1171 		ASSERT(ud);
1172 		xfs_qm_internalqcheck_dqadjust(ip, ud);
1173 	}
1174 	if (XFS_IS_OQUOTA_ON(mp)) {
1175 		ASSERT(gd);
1176 		xfs_qm_internalqcheck_dqadjust(ip, gd);
1177 	}
1178 	xfs_iunlock(ip, lock_flags);
1179 	IRELE(ip);
1180 	*res = BULKSTAT_RV_DIDONE;
1181 	return (0);
1182 }
1183 
1184 
1185 /* PRIVATE, debugging */
1186 int
xfs_qm_internalqcheck(xfs_mount_t * mp)1187 xfs_qm_internalqcheck(
1188 	xfs_mount_t	*mp)
1189 {
1190 	xfs_ino_t	lastino;
1191 	int		done, count;
1192 	int		i;
1193 	int		error;
1194 
1195 	lastino = 0;
1196 	qmtest_hashmask = 32;
1197 	count = 5;
1198 	done = 0;
1199 	qmtest_nfails = 0;
1200 
1201 	if (! XFS_IS_QUOTA_ON(mp))
1202 		return XFS_ERROR(ESRCH);
1203 
1204 	xfs_log_force(mp, XFS_LOG_SYNC);
1205 	XFS_bflush(mp->m_ddev_targp);
1206 	xfs_log_force(mp, XFS_LOG_SYNC);
1207 	XFS_bflush(mp->m_ddev_targp);
1208 
1209 	mutex_lock(&qcheck_lock);
1210 	/* There should be absolutely no quota activity while this
1211 	   is going on. */
1212 	qmtest_udqtab = kmem_zalloc(qmtest_hashmask *
1213 				    sizeof(xfs_dqhash_t), KM_SLEEP);
1214 	qmtest_gdqtab = kmem_zalloc(qmtest_hashmask *
1215 				    sizeof(xfs_dqhash_t), KM_SLEEP);
1216 	do {
1217 		/*
1218 		 * Iterate thru all the inodes in the file system,
1219 		 * adjusting the corresponding dquot counters
1220 		 */
1221 		error = xfs_bulkstat(mp, &lastino, &count,
1222 				 xfs_qm_internalqcheck_adjust,
1223 				 0, NULL, &done);
1224 		if (error) {
1225 			xfs_debug(mp, "Bulkstat returned error 0x%x", error);
1226 			break;
1227 		}
1228 	} while (!done);
1229 
1230 	xfs_debug(mp, "Checking results against system dquots");
1231 	for (i = 0; i < qmtest_hashmask; i++) {
1232 		xfs_dqtest_t	*d, *n;
1233 		xfs_dqhash_t	*h;
1234 
1235 		h = &qmtest_udqtab[i];
1236 		list_for_each_entry_safe(d, n, &h->qh_list, q_hashlist) {
1237 			xfs_dqtest_cmp(d);
1238 			kmem_free(d);
1239 		}
1240 		h = &qmtest_gdqtab[i];
1241 		list_for_each_entry_safe(d, n, &h->qh_list, q_hashlist) {
1242 			xfs_dqtest_cmp(d);
1243 			kmem_free(d);
1244 		}
1245 	}
1246 
1247 	if (qmtest_nfails) {
1248 		xfs_debug(mp, "******** quotacheck failed  ********");
1249 		xfs_debug(mp, "failures = %d", qmtest_nfails);
1250 	} else {
1251 		xfs_debug(mp, "******** quotacheck successful! ********");
1252 	}
1253 	kmem_free(qmtest_udqtab);
1254 	kmem_free(qmtest_gdqtab);
1255 	mutex_unlock(&qcheck_lock);
1256 	return (qmtest_nfails);
1257 }
1258 
1259 #endif /* DEBUG */
1260