1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6 #ifndef __XFS_DQUOT_H__
7 #define __XFS_DQUOT_H__
8
9 /*
10 * Dquots are structures that hold quota information about a user or a group,
11 * much like inodes are for files. In fact, dquots share many characteristics
12 * with inodes. However, dquots can also be a centralized resource, relative
13 * to a collection of inodes. In this respect, dquots share some characteristics
14 * of the superblock.
15 * XFS dquots exploit both those in its algorithms. They make every attempt
16 * to not be a bottleneck when quotas are on and have minimal impact, if any,
17 * when quotas are off.
18 */
19
20 struct xfs_mount;
21 struct xfs_trans;
22
23 enum {
24 XFS_QLOWSP_1_PCNT = 0,
25 XFS_QLOWSP_3_PCNT,
26 XFS_QLOWSP_5_PCNT,
27 XFS_QLOWSP_MAX
28 };
29
30 struct xfs_dquot_res {
31 /* Total resources allocated and reserved. */
32 xfs_qcnt_t reserved;
33
34 /* Total resources allocated. */
35 xfs_qcnt_t count;
36
37 /* Absolute and preferred limits. */
38 xfs_qcnt_t hardlimit;
39 xfs_qcnt_t softlimit;
40
41 /*
42 * For root dquots, this is the default grace period, in seconds.
43 * Otherwise, this is when the quota grace period expires,
44 * in seconds since the Unix epoch.
45 */
46 time64_t timer;
47 };
48
49 static inline bool
xfs_dquot_res_over_limits(const struct xfs_dquot_res * qres)50 xfs_dquot_res_over_limits(
51 const struct xfs_dquot_res *qres)
52 {
53 if ((qres->softlimit && qres->softlimit < qres->reserved) ||
54 (qres->hardlimit && qres->hardlimit < qres->reserved))
55 return true;
56 return false;
57 }
58
59 /*
60 * The incore dquot structure
61 */
62 struct xfs_dquot {
63 struct list_head q_lru;
64 struct xfs_mount *q_mount;
65 xfs_dqtype_t q_type;
66 uint16_t q_flags;
67 xfs_dqid_t q_id;
68 uint q_nrefs;
69 int q_bufoffset;
70 xfs_daddr_t q_blkno;
71 xfs_fileoff_t q_fileoffset;
72
73 struct xfs_dquot_res q_blk; /* regular blocks */
74 struct xfs_dquot_res q_ino; /* inodes */
75 struct xfs_dquot_res q_rtb; /* realtime blocks */
76
77 struct xfs_dq_logitem q_logitem;
78
79 xfs_qcnt_t q_prealloc_lo_wmark;
80 xfs_qcnt_t q_prealloc_hi_wmark;
81 int64_t q_low_space[XFS_QLOWSP_MAX];
82 struct mutex q_qlock;
83 struct completion q_flush;
84 atomic_t q_pincount;
85 struct wait_queue_head q_pinwait;
86 };
87
88 /*
89 * Lock hierarchy for q_qlock:
90 * XFS_QLOCK_NORMAL is the implicit default,
91 * XFS_QLOCK_NESTED is the dquot with the higher id in xfs_dqlock2
92 */
93 enum {
94 XFS_QLOCK_NORMAL = 0,
95 XFS_QLOCK_NESTED,
96 };
97
98 /*
99 * Manage the q_flush completion queue embedded in the dquot. This completion
100 * queue synchronizes processes attempting to flush the in-core dquot back to
101 * disk.
102 */
xfs_dqflock(struct xfs_dquot * dqp)103 static inline void xfs_dqflock(struct xfs_dquot *dqp)
104 {
105 wait_for_completion(&dqp->q_flush);
106 }
107
xfs_dqflock_nowait(struct xfs_dquot * dqp)108 static inline bool xfs_dqflock_nowait(struct xfs_dquot *dqp)
109 {
110 return try_wait_for_completion(&dqp->q_flush);
111 }
112
xfs_dqfunlock(struct xfs_dquot * dqp)113 static inline void xfs_dqfunlock(struct xfs_dquot *dqp)
114 {
115 complete(&dqp->q_flush);
116 }
117
xfs_dqlock_nowait(struct xfs_dquot * dqp)118 static inline int xfs_dqlock_nowait(struct xfs_dquot *dqp)
119 {
120 return mutex_trylock(&dqp->q_qlock);
121 }
122
xfs_dqlock(struct xfs_dquot * dqp)123 static inline void xfs_dqlock(struct xfs_dquot *dqp)
124 {
125 mutex_lock(&dqp->q_qlock);
126 }
127
xfs_dqunlock(struct xfs_dquot * dqp)128 static inline void xfs_dqunlock(struct xfs_dquot *dqp)
129 {
130 mutex_unlock(&dqp->q_qlock);
131 }
132
133 static inline int
xfs_dquot_type(const struct xfs_dquot * dqp)134 xfs_dquot_type(const struct xfs_dquot *dqp)
135 {
136 return dqp->q_type & XFS_DQTYPE_REC_MASK;
137 }
138
xfs_this_quota_on(struct xfs_mount * mp,xfs_dqtype_t type)139 static inline int xfs_this_quota_on(struct xfs_mount *mp, xfs_dqtype_t type)
140 {
141 switch (type) {
142 case XFS_DQTYPE_USER:
143 return XFS_IS_UQUOTA_ON(mp);
144 case XFS_DQTYPE_GROUP:
145 return XFS_IS_GQUOTA_ON(mp);
146 case XFS_DQTYPE_PROJ:
147 return XFS_IS_PQUOTA_ON(mp);
148 default:
149 return 0;
150 }
151 }
152
xfs_inode_dquot(struct xfs_inode * ip,xfs_dqtype_t type)153 static inline struct xfs_dquot *xfs_inode_dquot(
154 struct xfs_inode *ip,
155 xfs_dqtype_t type)
156 {
157 switch (type) {
158 case XFS_DQTYPE_USER:
159 return ip->i_udquot;
160 case XFS_DQTYPE_GROUP:
161 return ip->i_gdquot;
162 case XFS_DQTYPE_PROJ:
163 return ip->i_pdquot;
164 default:
165 return NULL;
166 }
167 }
168
169 /* Decide if the dquot's limits are actually being enforced. */
170 static inline bool
xfs_dquot_is_enforced(const struct xfs_dquot * dqp)171 xfs_dquot_is_enforced(
172 const struct xfs_dquot *dqp)
173 {
174 switch (xfs_dquot_type(dqp)) {
175 case XFS_DQTYPE_USER:
176 return XFS_IS_UQUOTA_ENFORCED(dqp->q_mount);
177 case XFS_DQTYPE_GROUP:
178 return XFS_IS_GQUOTA_ENFORCED(dqp->q_mount);
179 case XFS_DQTYPE_PROJ:
180 return XFS_IS_PQUOTA_ENFORCED(dqp->q_mount);
181 }
182 ASSERT(0);
183 return false;
184 }
185
186 /*
187 * Check whether a dquot is under low free space conditions. We assume the quota
188 * is enabled and enforced.
189 */
xfs_dquot_lowsp(struct xfs_dquot * dqp)190 static inline bool xfs_dquot_lowsp(struct xfs_dquot *dqp)
191 {
192 int64_t freesp;
193
194 freesp = dqp->q_blk.hardlimit - dqp->q_blk.reserved;
195 if (freesp < dqp->q_low_space[XFS_QLOWSP_1_PCNT])
196 return true;
197
198 return false;
199 }
200
201 void xfs_dquot_to_disk(struct xfs_disk_dquot *ddqp, struct xfs_dquot *dqp);
202
203 #define XFS_DQ_IS_LOCKED(dqp) (mutex_is_locked(&((dqp)->q_qlock)))
204 #define XFS_DQ_IS_DIRTY(dqp) ((dqp)->q_flags & XFS_DQFLAG_DIRTY)
205
206 void xfs_qm_dqdestroy(struct xfs_dquot *dqp);
207 int xfs_qm_dqflush(struct xfs_dquot *dqp, struct xfs_buf **bpp);
208 void xfs_qm_dqunpin_wait(struct xfs_dquot *dqp);
209 void xfs_qm_adjust_dqtimers(struct xfs_dquot *d);
210 void xfs_qm_adjust_dqlimits(struct xfs_dquot *d);
211 xfs_dqid_t xfs_qm_id_for_quotatype(struct xfs_inode *ip,
212 xfs_dqtype_t type);
213 int xfs_qm_dqget(struct xfs_mount *mp, xfs_dqid_t id,
214 xfs_dqtype_t type, bool can_alloc,
215 struct xfs_dquot **dqpp);
216 int xfs_qm_dqget_inode(struct xfs_inode *ip, xfs_dqtype_t type,
217 bool can_alloc, struct xfs_dquot **dqpp);
218 int xfs_qm_dqget_next(struct xfs_mount *mp, xfs_dqid_t id,
219 xfs_dqtype_t type, struct xfs_dquot **dqpp);
220 int xfs_qm_dqget_uncached(struct xfs_mount *mp,
221 xfs_dqid_t id, xfs_dqtype_t type,
222 struct xfs_dquot **dqpp);
223 void xfs_qm_dqput(struct xfs_dquot *dqp);
224
225 void xfs_dqlock2(struct xfs_dquot *, struct xfs_dquot *);
226
227 void xfs_dquot_set_prealloc_limits(struct xfs_dquot *);
228
xfs_qm_dqhold(struct xfs_dquot * dqp)229 static inline struct xfs_dquot *xfs_qm_dqhold(struct xfs_dquot *dqp)
230 {
231 xfs_dqlock(dqp);
232 dqp->q_nrefs++;
233 xfs_dqunlock(dqp);
234 return dqp;
235 }
236
237 typedef int (*xfs_qm_dqiterate_fn)(struct xfs_dquot *dq,
238 xfs_dqtype_t type, void *priv);
239 int xfs_qm_dqiterate(struct xfs_mount *mp, xfs_dqtype_t type,
240 xfs_qm_dqiterate_fn iter_fn, void *priv);
241
242 time64_t xfs_dquot_set_timeout(struct xfs_mount *mp, time64_t timeout);
243 time64_t xfs_dquot_set_grace_period(time64_t grace);
244
245 #endif /* __XFS_DQUOT_H__ */
246