1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9 
10 /*
11  * Quota change tags are associated with each transaction that allocates or
12  * deallocates space.  Those changes are accumulated locally to each node (in a
13  * per-node file) and then are periodically synced to the quota file.  This
14  * avoids the bottleneck of constantly touching the quota file, but introduces
15  * fuzziness in the current usage value of IDs that are being used on different
16  * nodes in the cluster simultaneously.  So, it is possible for a user on
17  * multiple nodes to overrun their quota, but that overrun is controlable.
18  * Since quota tags are part of transactions, there is no need for a quota check
19  * program to be run on node crashes or anything like that.
20  *
21  * There are couple of knobs that let the administrator manage the quota
22  * fuzziness.  "quota_quantum" sets the maximum time a quota change can be
23  * sitting on one node before being synced to the quota file.  (The default is
24  * 60 seconds.)  Another knob, "quota_scale" controls how quickly the frequency
25  * of quota file syncs increases as the user moves closer to their limit.  The
26  * more frequent the syncs, the more accurate the quota enforcement, but that
27  * means that there is more contention between the nodes for the quota file.
28  * The default value is one.  This sets the maximum theoretical quota overrun
29  * (with infinite node with infinite bandwidth) to twice the user's limit.  (In
30  * practice, the maximum overrun you see should be much less.)  A "quota_scale"
31  * number greater than one makes quota syncs more frequent and reduces the
32  * maximum overrun.  Numbers less than one (but greater than zero) make quota
33  * syncs less frequent.
34  *
35  * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
36  * the quota file, so it is not being constantly read.
37  */
38 
39 #include <linux/sched.h>
40 #include <linux/slab.h>
41 #include <linux/mm.h>
42 #include <linux/spinlock.h>
43 #include <linux/completion.h>
44 #include <linux/buffer_head.h>
45 #include <linux/sort.h>
46 #include <linux/fs.h>
47 #include <linux/bio.h>
48 #include <linux/gfs2_ondisk.h>
49 #include <linux/kthread.h>
50 #include <linux/freezer.h>
51 #include <linux/quota.h>
52 #include <linux/dqblk_xfs.h>
53 
54 #include "gfs2.h"
55 #include "incore.h"
56 #include "bmap.h"
57 #include "glock.h"
58 #include "glops.h"
59 #include "log.h"
60 #include "meta_io.h"
61 #include "quota.h"
62 #include "rgrp.h"
63 #include "super.h"
64 #include "trans.h"
65 #include "inode.h"
66 #include "util.h"
67 
68 #define QUOTA_USER 1
69 #define QUOTA_GROUP 0
70 
71 struct gfs2_quota_change_host {
72 	u64 qc_change;
73 	u32 qc_flags; /* GFS2_QCF_... */
74 	u32 qc_id;
75 };
76 
77 static LIST_HEAD(qd_lru_list);
78 static atomic_t qd_lru_count = ATOMIC_INIT(0);
79 static DEFINE_SPINLOCK(qd_lru_lock);
80 
gfs2_shrink_qd_memory(struct shrinker * shrink,struct shrink_control * sc)81 int gfs2_shrink_qd_memory(struct shrinker *shrink, struct shrink_control *sc)
82 {
83 	struct gfs2_quota_data *qd;
84 	struct gfs2_sbd *sdp;
85 	int nr_to_scan = sc->nr_to_scan;
86 
87 	if (nr_to_scan == 0)
88 		goto out;
89 
90 	if (!(sc->gfp_mask & __GFP_FS))
91 		return -1;
92 
93 	spin_lock(&qd_lru_lock);
94 	while (nr_to_scan && !list_empty(&qd_lru_list)) {
95 		qd = list_entry(qd_lru_list.next,
96 				struct gfs2_quota_data, qd_reclaim);
97 		sdp = qd->qd_gl->gl_sbd;
98 
99 		/* Free from the filesystem-specific list */
100 		list_del(&qd->qd_list);
101 
102 		gfs2_assert_warn(sdp, !qd->qd_change);
103 		gfs2_assert_warn(sdp, !qd->qd_slot_count);
104 		gfs2_assert_warn(sdp, !qd->qd_bh_count);
105 
106 		gfs2_glock_put(qd->qd_gl);
107 		atomic_dec(&sdp->sd_quota_count);
108 
109 		/* Delete it from the common reclaim list */
110 		list_del_init(&qd->qd_reclaim);
111 		atomic_dec(&qd_lru_count);
112 		spin_unlock(&qd_lru_lock);
113 		kmem_cache_free(gfs2_quotad_cachep, qd);
114 		spin_lock(&qd_lru_lock);
115 		nr_to_scan--;
116 	}
117 	spin_unlock(&qd_lru_lock);
118 
119 out:
120 	return (atomic_read(&qd_lru_count) * sysctl_vfs_cache_pressure) / 100;
121 }
122 
qd2offset(struct gfs2_quota_data * qd)123 static u64 qd2offset(struct gfs2_quota_data *qd)
124 {
125 	u64 offset;
126 
127 	offset = 2 * (u64)qd->qd_id + !test_bit(QDF_USER, &qd->qd_flags);
128 	offset *= sizeof(struct gfs2_quota);
129 
130 	return offset;
131 }
132 
qd_alloc(struct gfs2_sbd * sdp,int user,u32 id,struct gfs2_quota_data ** qdp)133 static int qd_alloc(struct gfs2_sbd *sdp, int user, u32 id,
134 		    struct gfs2_quota_data **qdp)
135 {
136 	struct gfs2_quota_data *qd;
137 	int error;
138 
139 	qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS);
140 	if (!qd)
141 		return -ENOMEM;
142 
143 	atomic_set(&qd->qd_count, 1);
144 	qd->qd_id = id;
145 	if (user)
146 		set_bit(QDF_USER, &qd->qd_flags);
147 	qd->qd_slot = -1;
148 	INIT_LIST_HEAD(&qd->qd_reclaim);
149 
150 	error = gfs2_glock_get(sdp, 2 * (u64)id + !user,
151 			      &gfs2_quota_glops, CREATE, &qd->qd_gl);
152 	if (error)
153 		goto fail;
154 
155 	*qdp = qd;
156 
157 	return 0;
158 
159 fail:
160 	kmem_cache_free(gfs2_quotad_cachep, qd);
161 	return error;
162 }
163 
qd_get(struct gfs2_sbd * sdp,int user,u32 id,struct gfs2_quota_data ** qdp)164 static int qd_get(struct gfs2_sbd *sdp, int user, u32 id,
165 		  struct gfs2_quota_data **qdp)
166 {
167 	struct gfs2_quota_data *qd = NULL, *new_qd = NULL;
168 	int error, found;
169 
170 	*qdp = NULL;
171 
172 	for (;;) {
173 		found = 0;
174 		spin_lock(&qd_lru_lock);
175 		list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
176 			if (qd->qd_id == id &&
177 			    !test_bit(QDF_USER, &qd->qd_flags) == !user) {
178 				if (!atomic_read(&qd->qd_count) &&
179 				    !list_empty(&qd->qd_reclaim)) {
180 					/* Remove it from reclaim list */
181 					list_del_init(&qd->qd_reclaim);
182 					atomic_dec(&qd_lru_count);
183 				}
184 				atomic_inc(&qd->qd_count);
185 				found = 1;
186 				break;
187 			}
188 		}
189 
190 		if (!found)
191 			qd = NULL;
192 
193 		if (!qd && new_qd) {
194 			qd = new_qd;
195 			list_add(&qd->qd_list, &sdp->sd_quota_list);
196 			atomic_inc(&sdp->sd_quota_count);
197 			new_qd = NULL;
198 		}
199 
200 		spin_unlock(&qd_lru_lock);
201 
202 		if (qd) {
203 			if (new_qd) {
204 				gfs2_glock_put(new_qd->qd_gl);
205 				kmem_cache_free(gfs2_quotad_cachep, new_qd);
206 			}
207 			*qdp = qd;
208 			return 0;
209 		}
210 
211 		error = qd_alloc(sdp, user, id, &new_qd);
212 		if (error)
213 			return error;
214 	}
215 }
216 
qd_hold(struct gfs2_quota_data * qd)217 static void qd_hold(struct gfs2_quota_data *qd)
218 {
219 	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
220 	gfs2_assert(sdp, atomic_read(&qd->qd_count));
221 	atomic_inc(&qd->qd_count);
222 }
223 
qd_put(struct gfs2_quota_data * qd)224 static void qd_put(struct gfs2_quota_data *qd)
225 {
226 	if (atomic_dec_and_lock(&qd->qd_count, &qd_lru_lock)) {
227 		/* Add to the reclaim list */
228 		list_add_tail(&qd->qd_reclaim, &qd_lru_list);
229 		atomic_inc(&qd_lru_count);
230 		spin_unlock(&qd_lru_lock);
231 	}
232 }
233 
slot_get(struct gfs2_quota_data * qd)234 static int slot_get(struct gfs2_quota_data *qd)
235 {
236 	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
237 	unsigned int c, o = 0, b;
238 	unsigned char byte = 0;
239 
240 	spin_lock(&qd_lru_lock);
241 
242 	if (qd->qd_slot_count++) {
243 		spin_unlock(&qd_lru_lock);
244 		return 0;
245 	}
246 
247 	for (c = 0; c < sdp->sd_quota_chunks; c++)
248 		for (o = 0; o < PAGE_SIZE; o++) {
249 			byte = sdp->sd_quota_bitmap[c][o];
250 			if (byte != 0xFF)
251 				goto found;
252 		}
253 
254 	goto fail;
255 
256 found:
257 	for (b = 0; b < 8; b++)
258 		if (!(byte & (1 << b)))
259 			break;
260 	qd->qd_slot = c * (8 * PAGE_SIZE) + o * 8 + b;
261 
262 	if (qd->qd_slot >= sdp->sd_quota_slots)
263 		goto fail;
264 
265 	sdp->sd_quota_bitmap[c][o] |= 1 << b;
266 
267 	spin_unlock(&qd_lru_lock);
268 
269 	return 0;
270 
271 fail:
272 	qd->qd_slot_count--;
273 	spin_unlock(&qd_lru_lock);
274 	return -ENOSPC;
275 }
276 
slot_hold(struct gfs2_quota_data * qd)277 static void slot_hold(struct gfs2_quota_data *qd)
278 {
279 	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
280 
281 	spin_lock(&qd_lru_lock);
282 	gfs2_assert(sdp, qd->qd_slot_count);
283 	qd->qd_slot_count++;
284 	spin_unlock(&qd_lru_lock);
285 }
286 
slot_put(struct gfs2_quota_data * qd)287 static void slot_put(struct gfs2_quota_data *qd)
288 {
289 	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
290 
291 	spin_lock(&qd_lru_lock);
292 	gfs2_assert(sdp, qd->qd_slot_count);
293 	if (!--qd->qd_slot_count) {
294 		gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0);
295 		qd->qd_slot = -1;
296 	}
297 	spin_unlock(&qd_lru_lock);
298 }
299 
bh_get(struct gfs2_quota_data * qd)300 static int bh_get(struct gfs2_quota_data *qd)
301 {
302 	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
303 	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
304 	unsigned int block, offset;
305 	struct buffer_head *bh;
306 	int error;
307 	struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
308 
309 	mutex_lock(&sdp->sd_quota_mutex);
310 
311 	if (qd->qd_bh_count++) {
312 		mutex_unlock(&sdp->sd_quota_mutex);
313 		return 0;
314 	}
315 
316 	block = qd->qd_slot / sdp->sd_qc_per_block;
317 	offset = qd->qd_slot % sdp->sd_qc_per_block;
318 
319 	bh_map.b_size = 1 << ip->i_inode.i_blkbits;
320 	error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0);
321 	if (error)
322 		goto fail;
323 	error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, &bh);
324 	if (error)
325 		goto fail;
326 	error = -EIO;
327 	if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
328 		goto fail_brelse;
329 
330 	qd->qd_bh = bh;
331 	qd->qd_bh_qc = (struct gfs2_quota_change *)
332 		(bh->b_data + sizeof(struct gfs2_meta_header) +
333 		 offset * sizeof(struct gfs2_quota_change));
334 
335 	mutex_unlock(&sdp->sd_quota_mutex);
336 
337 	return 0;
338 
339 fail_brelse:
340 	brelse(bh);
341 fail:
342 	qd->qd_bh_count--;
343 	mutex_unlock(&sdp->sd_quota_mutex);
344 	return error;
345 }
346 
bh_put(struct gfs2_quota_data * qd)347 static void bh_put(struct gfs2_quota_data *qd)
348 {
349 	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
350 
351 	mutex_lock(&sdp->sd_quota_mutex);
352 	gfs2_assert(sdp, qd->qd_bh_count);
353 	if (!--qd->qd_bh_count) {
354 		brelse(qd->qd_bh);
355 		qd->qd_bh = NULL;
356 		qd->qd_bh_qc = NULL;
357 	}
358 	mutex_unlock(&sdp->sd_quota_mutex);
359 }
360 
qd_fish(struct gfs2_sbd * sdp,struct gfs2_quota_data ** qdp)361 static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
362 {
363 	struct gfs2_quota_data *qd = NULL;
364 	int error;
365 	int found = 0;
366 
367 	*qdp = NULL;
368 
369 	if (sdp->sd_vfs->s_flags & MS_RDONLY)
370 		return 0;
371 
372 	spin_lock(&qd_lru_lock);
373 
374 	list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
375 		if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
376 		    !test_bit(QDF_CHANGE, &qd->qd_flags) ||
377 		    qd->qd_sync_gen >= sdp->sd_quota_sync_gen)
378 			continue;
379 
380 		list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
381 
382 		set_bit(QDF_LOCKED, &qd->qd_flags);
383 		gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
384 		atomic_inc(&qd->qd_count);
385 		qd->qd_change_sync = qd->qd_change;
386 		gfs2_assert_warn(sdp, qd->qd_slot_count);
387 		qd->qd_slot_count++;
388 		found = 1;
389 
390 		break;
391 	}
392 
393 	if (!found)
394 		qd = NULL;
395 
396 	spin_unlock(&qd_lru_lock);
397 
398 	if (qd) {
399 		gfs2_assert_warn(sdp, qd->qd_change_sync);
400 		error = bh_get(qd);
401 		if (error) {
402 			clear_bit(QDF_LOCKED, &qd->qd_flags);
403 			slot_put(qd);
404 			qd_put(qd);
405 			return error;
406 		}
407 	}
408 
409 	*qdp = qd;
410 
411 	return 0;
412 }
413 
qd_trylock(struct gfs2_quota_data * qd)414 static int qd_trylock(struct gfs2_quota_data *qd)
415 {
416 	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
417 
418 	if (sdp->sd_vfs->s_flags & MS_RDONLY)
419 		return 0;
420 
421 	spin_lock(&qd_lru_lock);
422 
423 	if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
424 	    !test_bit(QDF_CHANGE, &qd->qd_flags)) {
425 		spin_unlock(&qd_lru_lock);
426 		return 0;
427 	}
428 
429 	list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
430 
431 	set_bit(QDF_LOCKED, &qd->qd_flags);
432 	gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
433 	atomic_inc(&qd->qd_count);
434 	qd->qd_change_sync = qd->qd_change;
435 	gfs2_assert_warn(sdp, qd->qd_slot_count);
436 	qd->qd_slot_count++;
437 
438 	spin_unlock(&qd_lru_lock);
439 
440 	gfs2_assert_warn(sdp, qd->qd_change_sync);
441 	if (bh_get(qd)) {
442 		clear_bit(QDF_LOCKED, &qd->qd_flags);
443 		slot_put(qd);
444 		qd_put(qd);
445 		return 0;
446 	}
447 
448 	return 1;
449 }
450 
qd_unlock(struct gfs2_quota_data * qd)451 static void qd_unlock(struct gfs2_quota_data *qd)
452 {
453 	gfs2_assert_warn(qd->qd_gl->gl_sbd,
454 			 test_bit(QDF_LOCKED, &qd->qd_flags));
455 	clear_bit(QDF_LOCKED, &qd->qd_flags);
456 	bh_put(qd);
457 	slot_put(qd);
458 	qd_put(qd);
459 }
460 
qdsb_get(struct gfs2_sbd * sdp,int user,u32 id,struct gfs2_quota_data ** qdp)461 static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id,
462 		    struct gfs2_quota_data **qdp)
463 {
464 	int error;
465 
466 	error = qd_get(sdp, user, id, qdp);
467 	if (error)
468 		return error;
469 
470 	error = slot_get(*qdp);
471 	if (error)
472 		goto fail;
473 
474 	error = bh_get(*qdp);
475 	if (error)
476 		goto fail_slot;
477 
478 	return 0;
479 
480 fail_slot:
481 	slot_put(*qdp);
482 fail:
483 	qd_put(*qdp);
484 	return error;
485 }
486 
qdsb_put(struct gfs2_quota_data * qd)487 static void qdsb_put(struct gfs2_quota_data *qd)
488 {
489 	bh_put(qd);
490 	slot_put(qd);
491 	qd_put(qd);
492 }
493 
gfs2_quota_hold(struct gfs2_inode * ip,u32 uid,u32 gid)494 int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
495 {
496 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
497 	struct gfs2_qadata *qa = ip->i_qadata;
498 	struct gfs2_quota_data **qd = qa->qa_qd;
499 	int error;
500 
501 	if (gfs2_assert_warn(sdp, !qa->qa_qd_num) ||
502 	    gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
503 		return -EIO;
504 
505 	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
506 		return 0;
507 
508 	error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, qd);
509 	if (error)
510 		goto out;
511 	qa->qa_qd_num++;
512 	qd++;
513 
514 	error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, qd);
515 	if (error)
516 		goto out;
517 	qa->qa_qd_num++;
518 	qd++;
519 
520 	if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) {
521 		error = qdsb_get(sdp, QUOTA_USER, uid, qd);
522 		if (error)
523 			goto out;
524 		qa->qa_qd_num++;
525 		qd++;
526 	}
527 
528 	if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) {
529 		error = qdsb_get(sdp, QUOTA_GROUP, gid, qd);
530 		if (error)
531 			goto out;
532 		qa->qa_qd_num++;
533 		qd++;
534 	}
535 
536 out:
537 	if (error)
538 		gfs2_quota_unhold(ip);
539 	return error;
540 }
541 
gfs2_quota_unhold(struct gfs2_inode * ip)542 void gfs2_quota_unhold(struct gfs2_inode *ip)
543 {
544 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
545 	struct gfs2_qadata *qa = ip->i_qadata;
546 	unsigned int x;
547 
548 	gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
549 
550 	for (x = 0; x < qa->qa_qd_num; x++) {
551 		qdsb_put(qa->qa_qd[x]);
552 		qa->qa_qd[x] = NULL;
553 	}
554 	qa->qa_qd_num = 0;
555 }
556 
sort_qd(const void * a,const void * b)557 static int sort_qd(const void *a, const void *b)
558 {
559 	const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
560 	const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
561 
562 	if (!test_bit(QDF_USER, &qd_a->qd_flags) !=
563 	    !test_bit(QDF_USER, &qd_b->qd_flags)) {
564 		if (test_bit(QDF_USER, &qd_a->qd_flags))
565 			return -1;
566 		else
567 			return 1;
568 	}
569 	if (qd_a->qd_id < qd_b->qd_id)
570 		return -1;
571 	if (qd_a->qd_id > qd_b->qd_id)
572 		return 1;
573 
574 	return 0;
575 }
576 
do_qc(struct gfs2_quota_data * qd,s64 change)577 static void do_qc(struct gfs2_quota_data *qd, s64 change)
578 {
579 	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
580 	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
581 	struct gfs2_quota_change *qc = qd->qd_bh_qc;
582 	s64 x;
583 
584 	mutex_lock(&sdp->sd_quota_mutex);
585 	gfs2_trans_add_bh(ip->i_gl, qd->qd_bh, 1);
586 
587 	if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
588 		qc->qc_change = 0;
589 		qc->qc_flags = 0;
590 		if (test_bit(QDF_USER, &qd->qd_flags))
591 			qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
592 		qc->qc_id = cpu_to_be32(qd->qd_id);
593 	}
594 
595 	x = be64_to_cpu(qc->qc_change) + change;
596 	qc->qc_change = cpu_to_be64(x);
597 
598 	spin_lock(&qd_lru_lock);
599 	qd->qd_change = x;
600 	spin_unlock(&qd_lru_lock);
601 
602 	if (!x) {
603 		gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
604 		clear_bit(QDF_CHANGE, &qd->qd_flags);
605 		qc->qc_flags = 0;
606 		qc->qc_id = 0;
607 		slot_put(qd);
608 		qd_put(qd);
609 	} else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
610 		qd_hold(qd);
611 		slot_hold(qd);
612 	}
613 
614 	mutex_unlock(&sdp->sd_quota_mutex);
615 }
616 
617 /**
618  * gfs2_adjust_quota - adjust record of current block usage
619  * @ip: The quota inode
620  * @loc: Offset of the entry in the quota file
621  * @change: The amount of usage change to record
622  * @qd: The quota data
623  * @fdq: The updated limits to record
624  *
625  * This function was mostly borrowed from gfs2_block_truncate_page which was
626  * in turn mostly borrowed from ext3
627  *
628  * Returns: 0 or -ve on error
629  */
630 
gfs2_adjust_quota(struct gfs2_inode * ip,loff_t loc,s64 change,struct gfs2_quota_data * qd,struct fs_disk_quota * fdq)631 static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
632 			     s64 change, struct gfs2_quota_data *qd,
633 			     struct fs_disk_quota *fdq)
634 {
635 	struct inode *inode = &ip->i_inode;
636 	struct gfs2_sbd *sdp = GFS2_SB(inode);
637 	struct address_space *mapping = inode->i_mapping;
638 	unsigned long index = loc >> PAGE_CACHE_SHIFT;
639 	unsigned offset = loc & (PAGE_CACHE_SIZE - 1);
640 	unsigned blocksize, iblock, pos;
641 	struct buffer_head *bh;
642 	struct page *page;
643 	void *kaddr, *ptr;
644 	struct gfs2_quota q, *qp;
645 	int err, nbytes;
646 	u64 size;
647 
648 	if (gfs2_is_stuffed(ip)) {
649 		err = gfs2_unstuff_dinode(ip, NULL);
650 		if (err)
651 			return err;
652 	}
653 
654 	memset(&q, 0, sizeof(struct gfs2_quota));
655 	err = gfs2_internal_read(ip, NULL, (char *)&q, &loc, sizeof(q));
656 	if (err < 0)
657 		return err;
658 
659 	err = -EIO;
660 	qp = &q;
661 	qp->qu_value = be64_to_cpu(qp->qu_value);
662 	qp->qu_value += change;
663 	qp->qu_value = cpu_to_be64(qp->qu_value);
664 	qd->qd_qb.qb_value = qp->qu_value;
665 	if (fdq) {
666 		if (fdq->d_fieldmask & FS_DQ_BSOFT) {
667 			qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift);
668 			qd->qd_qb.qb_warn = qp->qu_warn;
669 		}
670 		if (fdq->d_fieldmask & FS_DQ_BHARD) {
671 			qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift);
672 			qd->qd_qb.qb_limit = qp->qu_limit;
673 		}
674 		if (fdq->d_fieldmask & FS_DQ_BCOUNT) {
675 			qp->qu_value = cpu_to_be64(fdq->d_bcount >> sdp->sd_fsb2bb_shift);
676 			qd->qd_qb.qb_value = qp->qu_value;
677 		}
678 	}
679 
680 	/* Write the quota into the quota file on disk */
681 	ptr = qp;
682 	nbytes = sizeof(struct gfs2_quota);
683 get_a_page:
684 	page = find_or_create_page(mapping, index, GFP_NOFS);
685 	if (!page)
686 		return -ENOMEM;
687 
688 	blocksize = inode->i_sb->s_blocksize;
689 	iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
690 
691 	if (!page_has_buffers(page))
692 		create_empty_buffers(page, blocksize, 0);
693 
694 	bh = page_buffers(page);
695 	pos = blocksize;
696 	while (offset >= pos) {
697 		bh = bh->b_this_page;
698 		iblock++;
699 		pos += blocksize;
700 	}
701 
702 	if (!buffer_mapped(bh)) {
703 		gfs2_block_map(inode, iblock, bh, 1);
704 		if (!buffer_mapped(bh))
705 			goto unlock_out;
706 		/* If it's a newly allocated disk block for quota, zero it */
707 		if (buffer_new(bh))
708 			zero_user(page, pos - blocksize, bh->b_size);
709 	}
710 
711 	if (PageUptodate(page))
712 		set_buffer_uptodate(bh);
713 
714 	if (!buffer_uptodate(bh)) {
715 		ll_rw_block(READ | REQ_META, 1, &bh);
716 		wait_on_buffer(bh);
717 		if (!buffer_uptodate(bh))
718 			goto unlock_out;
719 	}
720 
721 	gfs2_trans_add_bh(ip->i_gl, bh, 0);
722 
723 	kaddr = kmap_atomic(page);
724 	if (offset + sizeof(struct gfs2_quota) > PAGE_CACHE_SIZE)
725 		nbytes = PAGE_CACHE_SIZE - offset;
726 	memcpy(kaddr + offset, ptr, nbytes);
727 	flush_dcache_page(page);
728 	kunmap_atomic(kaddr);
729 	unlock_page(page);
730 	page_cache_release(page);
731 
732 	/* If quota straddles page boundary, we need to update the rest of the
733 	 * quota at the beginning of the next page */
734 	if ((offset + sizeof(struct gfs2_quota)) > PAGE_CACHE_SIZE) {
735 		ptr = ptr + nbytes;
736 		nbytes = sizeof(struct gfs2_quota) - nbytes;
737 		offset = 0;
738 		index++;
739 		goto get_a_page;
740 	}
741 
742 	size = loc + sizeof(struct gfs2_quota);
743 	if (size > inode->i_size)
744 		i_size_write(inode, size);
745 	inode->i_mtime = inode->i_atime = CURRENT_TIME;
746 	mark_inode_dirty(inode);
747 	return err;
748 
749 unlock_out:
750 	unlock_page(page);
751 	page_cache_release(page);
752 	return err;
753 }
754 
do_sync(unsigned int num_qd,struct gfs2_quota_data ** qda)755 static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
756 {
757 	struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
758 	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
759 	unsigned int data_blocks, ind_blocks;
760 	struct gfs2_holder *ghs, i_gh;
761 	unsigned int qx, x;
762 	struct gfs2_quota_data *qd;
763 	loff_t offset;
764 	unsigned int nalloc = 0, blocks;
765 	int error;
766 
767 	gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
768 			      &data_blocks, &ind_blocks);
769 
770 	ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
771 	if (!ghs)
772 		return -ENOMEM;
773 
774 	sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
775 	mutex_lock_nested(&ip->i_inode.i_mutex, I_MUTEX_QUOTA);
776 	for (qx = 0; qx < num_qd; qx++) {
777 		error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE,
778 					   GL_NOCACHE, &ghs[qx]);
779 		if (error)
780 			goto out;
781 	}
782 
783 	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
784 	if (error)
785 		goto out;
786 
787 	for (x = 0; x < num_qd; x++) {
788 		offset = qd2offset(qda[x]);
789 		if (gfs2_write_alloc_required(ip, offset,
790 					      sizeof(struct gfs2_quota)))
791 			nalloc++;
792 	}
793 
794 	/*
795 	 * 1 blk for unstuffing inode if stuffed. We add this extra
796 	 * block to the reservation unconditionally. If the inode
797 	 * doesn't need unstuffing, the block will be released to the
798 	 * rgrp since it won't be allocated during the transaction
799 	 */
800 	/* +3 in the end for unstuffing block, inode size update block
801 	 * and another block in case quota straddles page boundary and
802 	 * two blocks need to be updated instead of 1 */
803 	blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3;
804 
805 	error = gfs2_inplace_reserve(ip, 1 +
806 				     (nalloc * (data_blocks + ind_blocks)));
807 	if (error)
808 		goto out_alloc;
809 
810 	if (nalloc)
811 		blocks += gfs2_rg_blocks(ip) + nalloc * ind_blocks + RES_STATFS;
812 
813 	error = gfs2_trans_begin(sdp, blocks, 0);
814 	if (error)
815 		goto out_ipres;
816 
817 	for (x = 0; x < num_qd; x++) {
818 		qd = qda[x];
819 		offset = qd2offset(qd);
820 		error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL);
821 		if (error)
822 			goto out_end_trans;
823 
824 		do_qc(qd, -qd->qd_change_sync);
825 		set_bit(QDF_REFRESH, &qd->qd_flags);
826 	}
827 
828 	error = 0;
829 
830 out_end_trans:
831 	gfs2_trans_end(sdp);
832 out_ipres:
833 	gfs2_inplace_release(ip);
834 out_alloc:
835 	gfs2_glock_dq_uninit(&i_gh);
836 out:
837 	while (qx--)
838 		gfs2_glock_dq_uninit(&ghs[qx]);
839 	mutex_unlock(&ip->i_inode.i_mutex);
840 	kfree(ghs);
841 	gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl);
842 	return error;
843 }
844 
update_qd(struct gfs2_sbd * sdp,struct gfs2_quota_data * qd)845 static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd)
846 {
847 	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
848 	struct gfs2_quota q;
849 	struct gfs2_quota_lvb *qlvb;
850 	loff_t pos;
851 	int error;
852 
853 	memset(&q, 0, sizeof(struct gfs2_quota));
854 	pos = qd2offset(qd);
855 	error = gfs2_internal_read(ip, NULL, (char *)&q, &pos, sizeof(q));
856 	if (error < 0)
857 		return error;
858 
859 	qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
860 	qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
861 	qlvb->__pad = 0;
862 	qlvb->qb_limit = q.qu_limit;
863 	qlvb->qb_warn = q.qu_warn;
864 	qlvb->qb_value = q.qu_value;
865 	qd->qd_qb = *qlvb;
866 
867 	return 0;
868 }
869 
do_glock(struct gfs2_quota_data * qd,int force_refresh,struct gfs2_holder * q_gh)870 static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
871 		    struct gfs2_holder *q_gh)
872 {
873 	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
874 	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
875 	struct gfs2_holder i_gh;
876 	int error;
877 
878 restart:
879 	error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
880 	if (error)
881 		return error;
882 
883 	qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
884 
885 	if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
886 		gfs2_glock_dq_uninit(q_gh);
887 		error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE,
888 					   GL_NOCACHE, q_gh);
889 		if (error)
890 			return error;
891 
892 		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
893 		if (error)
894 			goto fail;
895 
896 		error = update_qd(sdp, qd);
897 		if (error)
898 			goto fail_gunlock;
899 
900 		gfs2_glock_dq_uninit(&i_gh);
901 		gfs2_glock_dq_uninit(q_gh);
902 		force_refresh = 0;
903 		goto restart;
904 	}
905 
906 	return 0;
907 
908 fail_gunlock:
909 	gfs2_glock_dq_uninit(&i_gh);
910 fail:
911 	gfs2_glock_dq_uninit(q_gh);
912 	return error;
913 }
914 
gfs2_quota_lock(struct gfs2_inode * ip,u32 uid,u32 gid)915 int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid)
916 {
917 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
918 	struct gfs2_qadata *qa = ip->i_qadata;
919 	struct gfs2_quota_data *qd;
920 	unsigned int x;
921 	int error = 0;
922 
923 	error = gfs2_quota_hold(ip, uid, gid);
924 	if (error)
925 		return error;
926 
927 	if (capable(CAP_SYS_RESOURCE) ||
928 	    sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
929 		return 0;
930 
931 	sort(qa->qa_qd, qa->qa_qd_num, sizeof(struct gfs2_quota_data *),
932 	     sort_qd, NULL);
933 
934 	for (x = 0; x < qa->qa_qd_num; x++) {
935 		int force = NO_FORCE;
936 		qd = qa->qa_qd[x];
937 		if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags))
938 			force = FORCE;
939 		error = do_glock(qd, force, &qa->qa_qd_ghs[x]);
940 		if (error)
941 			break;
942 	}
943 
944 	if (!error)
945 		set_bit(GIF_QD_LOCKED, &ip->i_flags);
946 	else {
947 		while (x--)
948 			gfs2_glock_dq_uninit(&qa->qa_qd_ghs[x]);
949 		gfs2_quota_unhold(ip);
950 	}
951 
952 	return error;
953 }
954 
need_sync(struct gfs2_quota_data * qd)955 static int need_sync(struct gfs2_quota_data *qd)
956 {
957 	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
958 	struct gfs2_tune *gt = &sdp->sd_tune;
959 	s64 value;
960 	unsigned int num, den;
961 	int do_sync = 1;
962 
963 	if (!qd->qd_qb.qb_limit)
964 		return 0;
965 
966 	spin_lock(&qd_lru_lock);
967 	value = qd->qd_change;
968 	spin_unlock(&qd_lru_lock);
969 
970 	spin_lock(&gt->gt_spin);
971 	num = gt->gt_quota_scale_num;
972 	den = gt->gt_quota_scale_den;
973 	spin_unlock(&gt->gt_spin);
974 
975 	if (value < 0)
976 		do_sync = 0;
977 	else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
978 		 (s64)be64_to_cpu(qd->qd_qb.qb_limit))
979 		do_sync = 0;
980 	else {
981 		value *= gfs2_jindex_size(sdp) * num;
982 		value = div_s64(value, den);
983 		value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
984 		if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
985 			do_sync = 0;
986 	}
987 
988 	return do_sync;
989 }
990 
gfs2_quota_unlock(struct gfs2_inode * ip)991 void gfs2_quota_unlock(struct gfs2_inode *ip)
992 {
993 	struct gfs2_qadata *qa = ip->i_qadata;
994 	struct gfs2_quota_data *qda[4];
995 	unsigned int count = 0;
996 	unsigned int x;
997 
998 	if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
999 		goto out;
1000 
1001 	for (x = 0; x < qa->qa_qd_num; x++) {
1002 		struct gfs2_quota_data *qd;
1003 		int sync;
1004 
1005 		qd = qa->qa_qd[x];
1006 		sync = need_sync(qd);
1007 
1008 		gfs2_glock_dq_uninit(&qa->qa_qd_ghs[x]);
1009 
1010 		if (sync && qd_trylock(qd))
1011 			qda[count++] = qd;
1012 	}
1013 
1014 	if (count) {
1015 		do_sync(count, qda);
1016 		for (x = 0; x < count; x++)
1017 			qd_unlock(qda[x]);
1018 	}
1019 
1020 out:
1021 	gfs2_quota_unhold(ip);
1022 }
1023 
1024 #define MAX_LINE 256
1025 
print_message(struct gfs2_quota_data * qd,char * type)1026 static int print_message(struct gfs2_quota_data *qd, char *type)
1027 {
1028 	struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
1029 
1030 	printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\n",
1031 	       sdp->sd_fsname, type,
1032 	       (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group",
1033 	       qd->qd_id);
1034 
1035 	return 0;
1036 }
1037 
gfs2_quota_check(struct gfs2_inode * ip,u32 uid,u32 gid)1038 int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
1039 {
1040 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1041 	struct gfs2_qadata *qa = ip->i_qadata;
1042 	struct gfs2_quota_data *qd;
1043 	s64 value;
1044 	unsigned int x;
1045 	int error = 0;
1046 
1047 	if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
1048 		return 0;
1049 
1050         if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
1051                 return 0;
1052 
1053 	for (x = 0; x < qa->qa_qd_num; x++) {
1054 		qd = qa->qa_qd[x];
1055 
1056 		if (!((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
1057 		      (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))))
1058 			continue;
1059 
1060 		value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
1061 		spin_lock(&qd_lru_lock);
1062 		value += qd->qd_change;
1063 		spin_unlock(&qd_lru_lock);
1064 
1065 		if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
1066 			print_message(qd, "exceeded");
1067 			quota_send_warning(test_bit(QDF_USER, &qd->qd_flags) ?
1068 					   USRQUOTA : GRPQUOTA, qd->qd_id,
1069 					   sdp->sd_vfs->s_dev, QUOTA_NL_BHARDWARN);
1070 
1071 			error = -EDQUOT;
1072 			break;
1073 		} else if (be64_to_cpu(qd->qd_qb.qb_warn) &&
1074 			   (s64)be64_to_cpu(qd->qd_qb.qb_warn) < value &&
1075 			   time_after_eq(jiffies, qd->qd_last_warn +
1076 					 gfs2_tune_get(sdp,
1077 						gt_quota_warn_period) * HZ)) {
1078 			quota_send_warning(test_bit(QDF_USER, &qd->qd_flags) ?
1079 					   USRQUOTA : GRPQUOTA, qd->qd_id,
1080 					   sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
1081 			error = print_message(qd, "warning");
1082 			qd->qd_last_warn = jiffies;
1083 		}
1084 	}
1085 
1086 	return error;
1087 }
1088 
gfs2_quota_change(struct gfs2_inode * ip,s64 change,u32 uid,u32 gid)1089 void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
1090 		       u32 uid, u32 gid)
1091 {
1092 	struct gfs2_qadata *qa = ip->i_qadata;
1093 	struct gfs2_quota_data *qd;
1094 	unsigned int x;
1095 
1096 	if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), change))
1097 		return;
1098 	if (ip->i_diskflags & GFS2_DIF_SYSTEM)
1099 		return;
1100 
1101 	for (x = 0; x < qa->qa_qd_num; x++) {
1102 		qd = qa->qa_qd[x];
1103 
1104 		if ((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
1105 		    (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))) {
1106 			do_qc(qd, change);
1107 		}
1108 	}
1109 }
1110 
gfs2_quota_sync(struct super_block * sb,int type,int wait)1111 int gfs2_quota_sync(struct super_block *sb, int type, int wait)
1112 {
1113 	struct gfs2_sbd *sdp = sb->s_fs_info;
1114 	struct gfs2_quota_data **qda;
1115 	unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync);
1116 	unsigned int num_qd;
1117 	unsigned int x;
1118 	int error = 0;
1119 
1120 	sdp->sd_quota_sync_gen++;
1121 
1122 	qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
1123 	if (!qda)
1124 		return -ENOMEM;
1125 
1126 	do {
1127 		num_qd = 0;
1128 
1129 		for (;;) {
1130 			error = qd_fish(sdp, qda + num_qd);
1131 			if (error || !qda[num_qd])
1132 				break;
1133 			if (++num_qd == max_qd)
1134 				break;
1135 		}
1136 
1137 		if (num_qd) {
1138 			if (!error)
1139 				error = do_sync(num_qd, qda);
1140 			if (!error)
1141 				for (x = 0; x < num_qd; x++)
1142 					qda[x]->qd_sync_gen =
1143 						sdp->sd_quota_sync_gen;
1144 
1145 			for (x = 0; x < num_qd; x++)
1146 				qd_unlock(qda[x]);
1147 		}
1148 	} while (!error && num_qd == max_qd);
1149 
1150 	kfree(qda);
1151 
1152 	return error;
1153 }
1154 
gfs2_quota_sync_timeo(struct super_block * sb,int type)1155 static int gfs2_quota_sync_timeo(struct super_block *sb, int type)
1156 {
1157 	return gfs2_quota_sync(sb, type, 0);
1158 }
1159 
gfs2_quota_refresh(struct gfs2_sbd * sdp,int user,u32 id)1160 int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id)
1161 {
1162 	struct gfs2_quota_data *qd;
1163 	struct gfs2_holder q_gh;
1164 	int error;
1165 
1166 	error = qd_get(sdp, user, id, &qd);
1167 	if (error)
1168 		return error;
1169 
1170 	error = do_glock(qd, FORCE, &q_gh);
1171 	if (!error)
1172 		gfs2_glock_dq_uninit(&q_gh);
1173 
1174 	qd_put(qd);
1175 	return error;
1176 }
1177 
gfs2_quota_change_in(struct gfs2_quota_change_host * qc,const void * buf)1178 static void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void *buf)
1179 {
1180 	const struct gfs2_quota_change *str = buf;
1181 
1182 	qc->qc_change = be64_to_cpu(str->qc_change);
1183 	qc->qc_flags = be32_to_cpu(str->qc_flags);
1184 	qc->qc_id = be32_to_cpu(str->qc_id);
1185 }
1186 
gfs2_quota_init(struct gfs2_sbd * sdp)1187 int gfs2_quota_init(struct gfs2_sbd *sdp)
1188 {
1189 	struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
1190 	u64 size = i_size_read(sdp->sd_qc_inode);
1191 	unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift;
1192 	unsigned int x, slot = 0;
1193 	unsigned int found = 0;
1194 	u64 dblock;
1195 	u32 extlen = 0;
1196 	int error;
1197 
1198 	if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20))
1199 		return -EIO;
1200 
1201 	sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
1202 	sdp->sd_quota_chunks = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * PAGE_SIZE);
1203 
1204 	error = -ENOMEM;
1205 
1206 	sdp->sd_quota_bitmap = kcalloc(sdp->sd_quota_chunks,
1207 				       sizeof(unsigned char *), GFP_NOFS);
1208 	if (!sdp->sd_quota_bitmap)
1209 		return error;
1210 
1211 	for (x = 0; x < sdp->sd_quota_chunks; x++) {
1212 		sdp->sd_quota_bitmap[x] = kzalloc(PAGE_SIZE, GFP_NOFS);
1213 		if (!sdp->sd_quota_bitmap[x])
1214 			goto fail;
1215 	}
1216 
1217 	for (x = 0; x < blocks; x++) {
1218 		struct buffer_head *bh;
1219 		unsigned int y;
1220 
1221 		if (!extlen) {
1222 			int new = 0;
1223 			error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen);
1224 			if (error)
1225 				goto fail;
1226 		}
1227 		error = -EIO;
1228 		bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
1229 		if (!bh)
1230 			goto fail;
1231 		if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
1232 			brelse(bh);
1233 			goto fail;
1234 		}
1235 
1236 		for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
1237 		     y++, slot++) {
1238 			struct gfs2_quota_change_host qc;
1239 			struct gfs2_quota_data *qd;
1240 
1241 			gfs2_quota_change_in(&qc, bh->b_data +
1242 					  sizeof(struct gfs2_meta_header) +
1243 					  y * sizeof(struct gfs2_quota_change));
1244 			if (!qc.qc_change)
1245 				continue;
1246 
1247 			error = qd_alloc(sdp, (qc.qc_flags & GFS2_QCF_USER),
1248 					 qc.qc_id, &qd);
1249 			if (error) {
1250 				brelse(bh);
1251 				goto fail;
1252 			}
1253 
1254 			set_bit(QDF_CHANGE, &qd->qd_flags);
1255 			qd->qd_change = qc.qc_change;
1256 			qd->qd_slot = slot;
1257 			qd->qd_slot_count = 1;
1258 
1259 			spin_lock(&qd_lru_lock);
1260 			gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1);
1261 			list_add(&qd->qd_list, &sdp->sd_quota_list);
1262 			atomic_inc(&sdp->sd_quota_count);
1263 			spin_unlock(&qd_lru_lock);
1264 
1265 			found++;
1266 		}
1267 
1268 		brelse(bh);
1269 		dblock++;
1270 		extlen--;
1271 	}
1272 
1273 	if (found)
1274 		fs_info(sdp, "found %u quota changes\n", found);
1275 
1276 	return 0;
1277 
1278 fail:
1279 	gfs2_quota_cleanup(sdp);
1280 	return error;
1281 }
1282 
gfs2_quota_cleanup(struct gfs2_sbd * sdp)1283 void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1284 {
1285 	struct list_head *head = &sdp->sd_quota_list;
1286 	struct gfs2_quota_data *qd;
1287 	unsigned int x;
1288 
1289 	spin_lock(&qd_lru_lock);
1290 	while (!list_empty(head)) {
1291 		qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
1292 
1293 		if (atomic_read(&qd->qd_count) > 1 ||
1294 		    (atomic_read(&qd->qd_count) &&
1295 		     !test_bit(QDF_CHANGE, &qd->qd_flags))) {
1296 			list_move(&qd->qd_list, head);
1297 			spin_unlock(&qd_lru_lock);
1298 			schedule();
1299 			spin_lock(&qd_lru_lock);
1300 			continue;
1301 		}
1302 
1303 		list_del(&qd->qd_list);
1304 		/* Also remove if this qd exists in the reclaim list */
1305 		if (!list_empty(&qd->qd_reclaim)) {
1306 			list_del_init(&qd->qd_reclaim);
1307 			atomic_dec(&qd_lru_count);
1308 		}
1309 		atomic_dec(&sdp->sd_quota_count);
1310 		spin_unlock(&qd_lru_lock);
1311 
1312 		if (!atomic_read(&qd->qd_count)) {
1313 			gfs2_assert_warn(sdp, !qd->qd_change);
1314 			gfs2_assert_warn(sdp, !qd->qd_slot_count);
1315 		} else
1316 			gfs2_assert_warn(sdp, qd->qd_slot_count == 1);
1317 		gfs2_assert_warn(sdp, !qd->qd_bh_count);
1318 
1319 		gfs2_glock_put(qd->qd_gl);
1320 		kmem_cache_free(gfs2_quotad_cachep, qd);
1321 
1322 		spin_lock(&qd_lru_lock);
1323 	}
1324 	spin_unlock(&qd_lru_lock);
1325 
1326 	gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
1327 
1328 	if (sdp->sd_quota_bitmap) {
1329 		for (x = 0; x < sdp->sd_quota_chunks; x++)
1330 			kfree(sdp->sd_quota_bitmap[x]);
1331 		kfree(sdp->sd_quota_bitmap);
1332 	}
1333 }
1334 
quotad_error(struct gfs2_sbd * sdp,const char * msg,int error)1335 static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
1336 {
1337 	if (error == 0 || error == -EROFS)
1338 		return;
1339 	if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
1340 		fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
1341 }
1342 
quotad_check_timeo(struct gfs2_sbd * sdp,const char * msg,int (* fxn)(struct super_block * sb,int type),unsigned long t,unsigned long * timeo,unsigned int * new_timeo)1343 static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
1344 			       int (*fxn)(struct super_block *sb, int type),
1345 			       unsigned long t, unsigned long *timeo,
1346 			       unsigned int *new_timeo)
1347 {
1348 	if (t >= *timeo) {
1349 		int error = fxn(sdp->sd_vfs, 0);
1350 		quotad_error(sdp, msg, error);
1351 		*timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
1352 	} else {
1353 		*timeo -= t;
1354 	}
1355 }
1356 
quotad_check_trunc_list(struct gfs2_sbd * sdp)1357 static void quotad_check_trunc_list(struct gfs2_sbd *sdp)
1358 {
1359 	struct gfs2_inode *ip;
1360 
1361 	while(1) {
1362 		ip = NULL;
1363 		spin_lock(&sdp->sd_trunc_lock);
1364 		if (!list_empty(&sdp->sd_trunc_list)) {
1365 			ip = list_entry(sdp->sd_trunc_list.next,
1366 					struct gfs2_inode, i_trunc_list);
1367 			list_del_init(&ip->i_trunc_list);
1368 		}
1369 		spin_unlock(&sdp->sd_trunc_lock);
1370 		if (ip == NULL)
1371 			return;
1372 		gfs2_glock_finish_truncate(ip);
1373 	}
1374 }
1375 
gfs2_wake_up_statfs(struct gfs2_sbd * sdp)1376 void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
1377 	if (!sdp->sd_statfs_force_sync) {
1378 		sdp->sd_statfs_force_sync = 1;
1379 		wake_up(&sdp->sd_quota_wait);
1380 	}
1381 }
1382 
1383 
1384 /**
1385  * gfs2_quotad - Write cached quota changes into the quota file
1386  * @sdp: Pointer to GFS2 superblock
1387  *
1388  */
1389 
gfs2_quotad(void * data)1390 int gfs2_quotad(void *data)
1391 {
1392 	struct gfs2_sbd *sdp = data;
1393 	struct gfs2_tune *tune = &sdp->sd_tune;
1394 	unsigned long statfs_timeo = 0;
1395 	unsigned long quotad_timeo = 0;
1396 	unsigned long t = 0;
1397 	DEFINE_WAIT(wait);
1398 	int empty;
1399 
1400 	while (!kthread_should_stop()) {
1401 
1402 		/* Update the master statfs file */
1403 		if (sdp->sd_statfs_force_sync) {
1404 			int error = gfs2_statfs_sync(sdp->sd_vfs, 0);
1405 			quotad_error(sdp, "statfs", error);
1406 			statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ;
1407 		}
1408 		else
1409 			quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
1410 				   	   &statfs_timeo,
1411 					   &tune->gt_statfs_quantum);
1412 
1413 		/* Update quota file */
1414 		quotad_check_timeo(sdp, "sync", gfs2_quota_sync_timeo, t,
1415 				   &quotad_timeo, &tune->gt_quota_quantum);
1416 
1417 		/* Check for & recover partially truncated inodes */
1418 		quotad_check_trunc_list(sdp);
1419 
1420 		try_to_freeze();
1421 
1422 		t = min(quotad_timeo, statfs_timeo);
1423 
1424 		prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
1425 		spin_lock(&sdp->sd_trunc_lock);
1426 		empty = list_empty(&sdp->sd_trunc_list);
1427 		spin_unlock(&sdp->sd_trunc_lock);
1428 		if (empty && !sdp->sd_statfs_force_sync)
1429 			t -= schedule_timeout(t);
1430 		else
1431 			t = 0;
1432 		finish_wait(&sdp->sd_quota_wait, &wait);
1433 	}
1434 
1435 	return 0;
1436 }
1437 
gfs2_quota_get_xstate(struct super_block * sb,struct fs_quota_stat * fqs)1438 static int gfs2_quota_get_xstate(struct super_block *sb,
1439 				 struct fs_quota_stat *fqs)
1440 {
1441 	struct gfs2_sbd *sdp = sb->s_fs_info;
1442 
1443 	memset(fqs, 0, sizeof(struct fs_quota_stat));
1444 	fqs->qs_version = FS_QSTAT_VERSION;
1445 
1446 	switch (sdp->sd_args.ar_quota) {
1447 	case GFS2_QUOTA_ON:
1448 		fqs->qs_flags |= (FS_QUOTA_UDQ_ENFD | FS_QUOTA_GDQ_ENFD);
1449 		/*FALLTHRU*/
1450 	case GFS2_QUOTA_ACCOUNT:
1451 		fqs->qs_flags |= (FS_QUOTA_UDQ_ACCT | FS_QUOTA_GDQ_ACCT);
1452 		break;
1453 	case GFS2_QUOTA_OFF:
1454 		break;
1455 	}
1456 
1457 	if (sdp->sd_quota_inode) {
1458 		fqs->qs_uquota.qfs_ino = GFS2_I(sdp->sd_quota_inode)->i_no_addr;
1459 		fqs->qs_uquota.qfs_nblks = sdp->sd_quota_inode->i_blocks;
1460 	}
1461 	fqs->qs_uquota.qfs_nextents = 1; /* unsupported */
1462 	fqs->qs_gquota = fqs->qs_uquota; /* its the same inode in both cases */
1463 	fqs->qs_incoredqs = atomic_read(&qd_lru_count);
1464 	return 0;
1465 }
1466 
gfs2_get_dqblk(struct super_block * sb,int type,qid_t id,struct fs_disk_quota * fdq)1467 static int gfs2_get_dqblk(struct super_block *sb, int type, qid_t id,
1468 			  struct fs_disk_quota *fdq)
1469 {
1470 	struct gfs2_sbd *sdp = sb->s_fs_info;
1471 	struct gfs2_quota_lvb *qlvb;
1472 	struct gfs2_quota_data *qd;
1473 	struct gfs2_holder q_gh;
1474 	int error;
1475 
1476 	memset(fdq, 0, sizeof(struct fs_disk_quota));
1477 
1478 	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1479 		return -ESRCH; /* Crazy XFS error code */
1480 
1481 	if (type == USRQUOTA)
1482 		type = QUOTA_USER;
1483 	else if (type == GRPQUOTA)
1484 		type = QUOTA_GROUP;
1485 	else
1486 		return -EINVAL;
1487 
1488 	error = qd_get(sdp, type, id, &qd);
1489 	if (error)
1490 		return error;
1491 	error = do_glock(qd, FORCE, &q_gh);
1492 	if (error)
1493 		goto out;
1494 
1495 	qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
1496 	fdq->d_version = FS_DQUOT_VERSION;
1497 	fdq->d_flags = (type == QUOTA_USER) ? FS_USER_QUOTA : FS_GROUP_QUOTA;
1498 	fdq->d_id = id;
1499 	fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_fsb2bb_shift;
1500 	fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_fsb2bb_shift;
1501 	fdq->d_bcount = be64_to_cpu(qlvb->qb_value) << sdp->sd_fsb2bb_shift;
1502 
1503 	gfs2_glock_dq_uninit(&q_gh);
1504 out:
1505 	qd_put(qd);
1506 	return error;
1507 }
1508 
1509 /* GFS2 only supports a subset of the XFS fields */
1510 #define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD|FS_DQ_BCOUNT)
1511 
gfs2_set_dqblk(struct super_block * sb,int type,qid_t id,struct fs_disk_quota * fdq)1512 static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id,
1513 			  struct fs_disk_quota *fdq)
1514 {
1515 	struct gfs2_sbd *sdp = sb->s_fs_info;
1516 	struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
1517 	struct gfs2_quota_data *qd;
1518 	struct gfs2_holder q_gh, i_gh;
1519 	unsigned int data_blocks, ind_blocks;
1520 	unsigned int blocks = 0;
1521 	int alloc_required;
1522 	loff_t offset;
1523 	int error;
1524 
1525 	if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
1526 		return -ESRCH; /* Crazy XFS error code */
1527 
1528 	switch(type) {
1529 	case USRQUOTA:
1530 		type = QUOTA_USER;
1531 		if (fdq->d_flags != FS_USER_QUOTA)
1532 			return -EINVAL;
1533 		break;
1534 	case GRPQUOTA:
1535 		type = QUOTA_GROUP;
1536 		if (fdq->d_flags != FS_GROUP_QUOTA)
1537 			return -EINVAL;
1538 		break;
1539 	default:
1540 		return -EINVAL;
1541 	}
1542 
1543 	if (fdq->d_fieldmask & ~GFS2_FIELDMASK)
1544 		return -EINVAL;
1545 	if (fdq->d_id != id)
1546 		return -EINVAL;
1547 
1548 	error = qd_get(sdp, type, id, &qd);
1549 	if (error)
1550 		return error;
1551 
1552 	mutex_lock(&ip->i_inode.i_mutex);
1553 	error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh);
1554 	if (error)
1555 		goto out_put;
1556 	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
1557 	if (error)
1558 		goto out_q;
1559 
1560 	/* Check for existing entry, if none then alloc new blocks */
1561 	error = update_qd(sdp, qd);
1562 	if (error)
1563 		goto out_i;
1564 
1565 	/* If nothing has changed, this is a no-op */
1566 	if ((fdq->d_fieldmask & FS_DQ_BSOFT) &&
1567 	    ((fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
1568 		fdq->d_fieldmask ^= FS_DQ_BSOFT;
1569 
1570 	if ((fdq->d_fieldmask & FS_DQ_BHARD) &&
1571 	    ((fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
1572 		fdq->d_fieldmask ^= FS_DQ_BHARD;
1573 
1574 	if ((fdq->d_fieldmask & FS_DQ_BCOUNT) &&
1575 	    ((fdq->d_bcount >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
1576 		fdq->d_fieldmask ^= FS_DQ_BCOUNT;
1577 
1578 	if (fdq->d_fieldmask == 0)
1579 		goto out_i;
1580 
1581 	offset = qd2offset(qd);
1582 	alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota));
1583 	if (gfs2_is_stuffed(ip))
1584 		alloc_required = 1;
1585 	if (alloc_required) {
1586 		gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
1587 				       &data_blocks, &ind_blocks);
1588 		blocks = 1 + data_blocks + ind_blocks;
1589 		error = gfs2_inplace_reserve(ip, blocks);
1590 		if (error)
1591 			goto out_i;
1592 		blocks += gfs2_rg_blocks(ip);
1593 	}
1594 
1595 	/* Some quotas span block boundaries and can update two blocks,
1596 	   adding an extra block to the transaction to handle such quotas */
1597 	error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 2, 0);
1598 	if (error)
1599 		goto out_release;
1600 
1601 	/* Apply changes */
1602 	error = gfs2_adjust_quota(ip, offset, 0, qd, fdq);
1603 
1604 	gfs2_trans_end(sdp);
1605 out_release:
1606 	if (alloc_required)
1607 		gfs2_inplace_release(ip);
1608 out_i:
1609 	gfs2_glock_dq_uninit(&i_gh);
1610 out_q:
1611 	gfs2_glock_dq_uninit(&q_gh);
1612 out_put:
1613 	mutex_unlock(&ip->i_inode.i_mutex);
1614 	qd_put(qd);
1615 	return error;
1616 }
1617 
1618 const struct quotactl_ops gfs2_quotactl_ops = {
1619 	.quota_sync     = gfs2_quota_sync,
1620 	.get_xstate     = gfs2_quota_get_xstate,
1621 	.get_dqblk	= gfs2_get_dqblk,
1622 	.set_dqblk	= gfs2_set_dqblk,
1623 };
1624