1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * vfsv0 quota IO operations on file
4 */
5
6 #include <linux/errno.h>
7 #include <linux/fs.h>
8 #include <linux/mount.h>
9 #include <linux/dqblk_v2.h>
10 #include <linux/kernel.h>
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/quotaops.h>
15
16 #include <asm/byteorder.h>
17
18 #include "quota_tree.h"
19
20 MODULE_AUTHOR("Jan Kara");
21 MODULE_DESCRIPTION("Quota trie support");
22 MODULE_LICENSE("GPL");
23
24 #define __QUOTA_QT_PARANOIA
25
__get_index(struct qtree_mem_dqinfo * info,qid_t id,int depth)26 static int __get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth)
27 {
28 unsigned int epb = info->dqi_usable_bs >> 2;
29
30 depth = info->dqi_qtree_depth - depth - 1;
31 while (depth--)
32 id /= epb;
33 return id % epb;
34 }
35
get_index(struct qtree_mem_dqinfo * info,struct kqid qid,int depth)36 static int get_index(struct qtree_mem_dqinfo *info, struct kqid qid, int depth)
37 {
38 qid_t id = from_kqid(&init_user_ns, qid);
39
40 return __get_index(info, id, depth);
41 }
42
43 /* Number of entries in one blocks */
qtree_dqstr_in_blk(struct qtree_mem_dqinfo * info)44 static int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info)
45 {
46 return (info->dqi_usable_bs - sizeof(struct qt_disk_dqdbheader))
47 / info->dqi_entry_size;
48 }
49
read_blk(struct qtree_mem_dqinfo * info,uint blk,char * buf)50 static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
51 {
52 struct super_block *sb = info->dqi_sb;
53
54 memset(buf, 0, info->dqi_usable_bs);
55 return sb->s_op->quota_read(sb, info->dqi_type, buf,
56 info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits);
57 }
58
write_blk(struct qtree_mem_dqinfo * info,uint blk,char * buf)59 static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf)
60 {
61 struct super_block *sb = info->dqi_sb;
62 ssize_t ret;
63
64 ret = sb->s_op->quota_write(sb, info->dqi_type, buf,
65 info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits);
66 if (ret != info->dqi_usable_bs) {
67 quota_error(sb, "dquota write failed");
68 if (ret >= 0)
69 ret = -EIO;
70 }
71 return ret;
72 }
73
do_check_range(struct super_block * sb,const char * val_name,uint val,uint min_val,uint max_val)74 static inline int do_check_range(struct super_block *sb, const char *val_name,
75 uint val, uint min_val, uint max_val)
76 {
77 if (val < min_val || val > max_val) {
78 quota_error(sb, "Getting %s %u out of range %u-%u",
79 val_name, val, min_val, max_val);
80 return -EUCLEAN;
81 }
82
83 return 0;
84 }
85
check_dquot_block_header(struct qtree_mem_dqinfo * info,struct qt_disk_dqdbheader * dh)86 static int check_dquot_block_header(struct qtree_mem_dqinfo *info,
87 struct qt_disk_dqdbheader *dh)
88 {
89 int err = 0;
90
91 err = do_check_range(info->dqi_sb, "dqdh_next_free",
92 le32_to_cpu(dh->dqdh_next_free), 0,
93 info->dqi_blocks - 1);
94 if (err)
95 return err;
96 err = do_check_range(info->dqi_sb, "dqdh_prev_free",
97 le32_to_cpu(dh->dqdh_prev_free), 0,
98 info->dqi_blocks - 1);
99 if (err)
100 return err;
101 err = do_check_range(info->dqi_sb, "dqdh_entries",
102 le16_to_cpu(dh->dqdh_entries), 0,
103 qtree_dqstr_in_blk(info));
104
105 return err;
106 }
107
108 /* Remove empty block from list and return it */
get_free_dqblk(struct qtree_mem_dqinfo * info)109 static int get_free_dqblk(struct qtree_mem_dqinfo *info)
110 {
111 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
112 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
113 int ret, blk;
114
115 if (!buf)
116 return -ENOMEM;
117 if (info->dqi_free_blk) {
118 blk = info->dqi_free_blk;
119 ret = read_blk(info, blk, buf);
120 if (ret < 0)
121 goto out_buf;
122 ret = check_dquot_block_header(info, dh);
123 if (ret)
124 goto out_buf;
125 info->dqi_free_blk = le32_to_cpu(dh->dqdh_next_free);
126 }
127 else {
128 memset(buf, 0, info->dqi_usable_bs);
129 /* Assure block allocation... */
130 ret = write_blk(info, info->dqi_blocks, buf);
131 if (ret < 0)
132 goto out_buf;
133 blk = info->dqi_blocks++;
134 }
135 mark_info_dirty(info->dqi_sb, info->dqi_type);
136 ret = blk;
137 out_buf:
138 kfree(buf);
139 return ret;
140 }
141
142 /* Insert empty block to the list */
put_free_dqblk(struct qtree_mem_dqinfo * info,char * buf,uint blk)143 static int put_free_dqblk(struct qtree_mem_dqinfo *info, char *buf, uint blk)
144 {
145 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
146 int err;
147
148 dh->dqdh_next_free = cpu_to_le32(info->dqi_free_blk);
149 dh->dqdh_prev_free = cpu_to_le32(0);
150 dh->dqdh_entries = cpu_to_le16(0);
151 err = write_blk(info, blk, buf);
152 if (err < 0)
153 return err;
154 info->dqi_free_blk = blk;
155 mark_info_dirty(info->dqi_sb, info->dqi_type);
156 return 0;
157 }
158
159 /* Remove given block from the list of blocks with free entries */
remove_free_dqentry(struct qtree_mem_dqinfo * info,char * buf,uint blk)160 static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
161 uint blk)
162 {
163 char *tmpbuf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
164 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
165 uint nextblk = le32_to_cpu(dh->dqdh_next_free);
166 uint prevblk = le32_to_cpu(dh->dqdh_prev_free);
167 int err;
168
169 if (!tmpbuf)
170 return -ENOMEM;
171 if (nextblk) {
172 err = read_blk(info, nextblk, tmpbuf);
173 if (err < 0)
174 goto out_buf;
175 ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free =
176 dh->dqdh_prev_free;
177 err = write_blk(info, nextblk, tmpbuf);
178 if (err < 0)
179 goto out_buf;
180 }
181 if (prevblk) {
182 err = read_blk(info, prevblk, tmpbuf);
183 if (err < 0)
184 goto out_buf;
185 ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_next_free =
186 dh->dqdh_next_free;
187 err = write_blk(info, prevblk, tmpbuf);
188 if (err < 0)
189 goto out_buf;
190 } else {
191 info->dqi_free_entry = nextblk;
192 mark_info_dirty(info->dqi_sb, info->dqi_type);
193 }
194 kfree(tmpbuf);
195 dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0);
196 /* No matter whether write succeeds block is out of list */
197 if (write_blk(info, blk, buf) < 0)
198 quota_error(info->dqi_sb, "Can't write block (%u) "
199 "with free entries", blk);
200 return 0;
201 out_buf:
202 kfree(tmpbuf);
203 return err;
204 }
205
206 /* Insert given block to the beginning of list with free entries */
insert_free_dqentry(struct qtree_mem_dqinfo * info,char * buf,uint blk)207 static int insert_free_dqentry(struct qtree_mem_dqinfo *info, char *buf,
208 uint blk)
209 {
210 char *tmpbuf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
211 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf;
212 int err;
213
214 if (!tmpbuf)
215 return -ENOMEM;
216 dh->dqdh_next_free = cpu_to_le32(info->dqi_free_entry);
217 dh->dqdh_prev_free = cpu_to_le32(0);
218 err = write_blk(info, blk, buf);
219 if (err < 0)
220 goto out_buf;
221 if (info->dqi_free_entry) {
222 err = read_blk(info, info->dqi_free_entry, tmpbuf);
223 if (err < 0)
224 goto out_buf;
225 ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free =
226 cpu_to_le32(blk);
227 err = write_blk(info, info->dqi_free_entry, tmpbuf);
228 if (err < 0)
229 goto out_buf;
230 }
231 kfree(tmpbuf);
232 info->dqi_free_entry = blk;
233 mark_info_dirty(info->dqi_sb, info->dqi_type);
234 return 0;
235 out_buf:
236 kfree(tmpbuf);
237 return err;
238 }
239
240 /* Is the entry in the block free? */
qtree_entry_unused(struct qtree_mem_dqinfo * info,char * disk)241 int qtree_entry_unused(struct qtree_mem_dqinfo *info, char *disk)
242 {
243 int i;
244
245 for (i = 0; i < info->dqi_entry_size; i++)
246 if (disk[i])
247 return 0;
248 return 1;
249 }
250 EXPORT_SYMBOL(qtree_entry_unused);
251
252 /* Find space for dquot */
find_free_dqentry(struct qtree_mem_dqinfo * info,struct dquot * dquot,int * err)253 static uint find_free_dqentry(struct qtree_mem_dqinfo *info,
254 struct dquot *dquot, int *err)
255 {
256 uint blk, i;
257 struct qt_disk_dqdbheader *dh;
258 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
259 char *ddquot;
260
261 *err = 0;
262 if (!buf) {
263 *err = -ENOMEM;
264 return 0;
265 }
266 dh = (struct qt_disk_dqdbheader *)buf;
267 if (info->dqi_free_entry) {
268 blk = info->dqi_free_entry;
269 *err = read_blk(info, blk, buf);
270 if (*err < 0)
271 goto out_buf;
272 *err = check_dquot_block_header(info, dh);
273 if (*err)
274 goto out_buf;
275 } else {
276 blk = get_free_dqblk(info);
277 if ((int)blk < 0) {
278 *err = blk;
279 kfree(buf);
280 return 0;
281 }
282 memset(buf, 0, info->dqi_usable_bs);
283 /* This is enough as the block is already zeroed and the entry
284 * list is empty... */
285 info->dqi_free_entry = blk;
286 mark_info_dirty(dquot->dq_sb, dquot->dq_id.type);
287 }
288 /* Block will be full? */
289 if (le16_to_cpu(dh->dqdh_entries) + 1 >= qtree_dqstr_in_blk(info)) {
290 *err = remove_free_dqentry(info, buf, blk);
291 if (*err < 0) {
292 quota_error(dquot->dq_sb, "Can't remove block (%u) "
293 "from entry free list", blk);
294 goto out_buf;
295 }
296 }
297 le16_add_cpu(&dh->dqdh_entries, 1);
298 /* Find free structure in block */
299 ddquot = buf + sizeof(struct qt_disk_dqdbheader);
300 for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
301 if (qtree_entry_unused(info, ddquot))
302 break;
303 ddquot += info->dqi_entry_size;
304 }
305 #ifdef __QUOTA_QT_PARANOIA
306 if (i == qtree_dqstr_in_blk(info)) {
307 quota_error(dquot->dq_sb, "Data block full but it shouldn't");
308 *err = -EIO;
309 goto out_buf;
310 }
311 #endif
312 *err = write_blk(info, blk, buf);
313 if (*err < 0) {
314 quota_error(dquot->dq_sb, "Can't write quota data block %u",
315 blk);
316 goto out_buf;
317 }
318 dquot->dq_off = ((loff_t)blk << info->dqi_blocksize_bits) +
319 sizeof(struct qt_disk_dqdbheader) +
320 i * info->dqi_entry_size;
321 kfree(buf);
322 return blk;
323 out_buf:
324 kfree(buf);
325 return 0;
326 }
327
328 /* Insert reference to structure into the trie */
do_insert_tree(struct qtree_mem_dqinfo * info,struct dquot * dquot,uint * treeblk,int depth)329 static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
330 uint *treeblk, int depth)
331 {
332 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
333 int ret = 0, newson = 0, newact = 0;
334 __le32 *ref;
335 uint newblk;
336
337 if (!buf)
338 return -ENOMEM;
339 if (!*treeblk) {
340 ret = get_free_dqblk(info);
341 if (ret < 0)
342 goto out_buf;
343 *treeblk = ret;
344 memset(buf, 0, info->dqi_usable_bs);
345 newact = 1;
346 } else {
347 ret = read_blk(info, *treeblk, buf);
348 if (ret < 0) {
349 quota_error(dquot->dq_sb, "Can't read tree quota "
350 "block %u", *treeblk);
351 goto out_buf;
352 }
353 }
354 ref = (__le32 *)buf;
355 newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
356 ret = do_check_range(dquot->dq_sb, "block", newblk, 0,
357 info->dqi_blocks - 1);
358 if (ret)
359 goto out_buf;
360 if (!newblk)
361 newson = 1;
362 if (depth == info->dqi_qtree_depth - 1) {
363 #ifdef __QUOTA_QT_PARANOIA
364 if (newblk) {
365 quota_error(dquot->dq_sb, "Inserting already present "
366 "quota entry (block %u)",
367 le32_to_cpu(ref[get_index(info,
368 dquot->dq_id, depth)]));
369 ret = -EIO;
370 goto out_buf;
371 }
372 #endif
373 newblk = find_free_dqentry(info, dquot, &ret);
374 } else {
375 ret = do_insert_tree(info, dquot, &newblk, depth+1);
376 }
377 if (newson && ret >= 0) {
378 ref[get_index(info, dquot->dq_id, depth)] =
379 cpu_to_le32(newblk);
380 ret = write_blk(info, *treeblk, buf);
381 } else if (newact && ret < 0) {
382 put_free_dqblk(info, buf, *treeblk);
383 }
384 out_buf:
385 kfree(buf);
386 return ret;
387 }
388
389 /* Wrapper for inserting quota structure into tree */
dq_insert_tree(struct qtree_mem_dqinfo * info,struct dquot * dquot)390 static inline int dq_insert_tree(struct qtree_mem_dqinfo *info,
391 struct dquot *dquot)
392 {
393 int tmp = QT_TREEOFF;
394
395 #ifdef __QUOTA_QT_PARANOIA
396 if (info->dqi_blocks <= QT_TREEOFF) {
397 quota_error(dquot->dq_sb, "Quota tree root isn't allocated!");
398 return -EIO;
399 }
400 #endif
401 return do_insert_tree(info, dquot, &tmp, 0);
402 }
403
404 /*
405 * We don't have to be afraid of deadlocks as we never have quotas on quota
406 * files...
407 */
qtree_write_dquot(struct qtree_mem_dqinfo * info,struct dquot * dquot)408 int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
409 {
410 int type = dquot->dq_id.type;
411 struct super_block *sb = dquot->dq_sb;
412 ssize_t ret;
413 char *ddquot = kmalloc(info->dqi_entry_size, GFP_NOFS);
414
415 if (!ddquot)
416 return -ENOMEM;
417
418 /* dq_off is guarded by dqio_sem */
419 if (!dquot->dq_off) {
420 ret = dq_insert_tree(info, dquot);
421 if (ret < 0) {
422 quota_error(sb, "Error %zd occurred while creating "
423 "quota", ret);
424 kfree(ddquot);
425 return ret;
426 }
427 }
428 spin_lock(&dquot->dq_dqb_lock);
429 info->dqi_ops->mem2disk_dqblk(ddquot, dquot);
430 spin_unlock(&dquot->dq_dqb_lock);
431 ret = sb->s_op->quota_write(sb, type, ddquot, info->dqi_entry_size,
432 dquot->dq_off);
433 if (ret != info->dqi_entry_size) {
434 quota_error(sb, "dquota write failed");
435 if (ret >= 0)
436 ret = -ENOSPC;
437 } else {
438 ret = 0;
439 }
440 dqstats_inc(DQST_WRITES);
441 kfree(ddquot);
442
443 return ret;
444 }
445 EXPORT_SYMBOL(qtree_write_dquot);
446
447 /* Free dquot entry in data block */
free_dqentry(struct qtree_mem_dqinfo * info,struct dquot * dquot,uint blk)448 static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot,
449 uint blk)
450 {
451 struct qt_disk_dqdbheader *dh;
452 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
453 int ret = 0;
454
455 if (!buf)
456 return -ENOMEM;
457 if (dquot->dq_off >> info->dqi_blocksize_bits != blk) {
458 quota_error(dquot->dq_sb, "Quota structure has offset to "
459 "other block (%u) than it should (%u)", blk,
460 (uint)(dquot->dq_off >> info->dqi_blocksize_bits));
461 ret = -EIO;
462 goto out_buf;
463 }
464 ret = read_blk(info, blk, buf);
465 if (ret < 0) {
466 quota_error(dquot->dq_sb, "Can't read quota data block %u",
467 blk);
468 goto out_buf;
469 }
470 dh = (struct qt_disk_dqdbheader *)buf;
471 ret = check_dquot_block_header(info, dh);
472 if (ret)
473 goto out_buf;
474 le16_add_cpu(&dh->dqdh_entries, -1);
475 if (!le16_to_cpu(dh->dqdh_entries)) { /* Block got free? */
476 ret = remove_free_dqentry(info, buf, blk);
477 if (ret >= 0)
478 ret = put_free_dqblk(info, buf, blk);
479 if (ret < 0) {
480 quota_error(dquot->dq_sb, "Can't move quota data block "
481 "(%u) to free list", blk);
482 goto out_buf;
483 }
484 } else {
485 memset(buf +
486 (dquot->dq_off & ((1 << info->dqi_blocksize_bits) - 1)),
487 0, info->dqi_entry_size);
488 if (le16_to_cpu(dh->dqdh_entries) ==
489 qtree_dqstr_in_blk(info) - 1) {
490 /* Insert will write block itself */
491 ret = insert_free_dqentry(info, buf, blk);
492 if (ret < 0) {
493 quota_error(dquot->dq_sb, "Can't insert quota "
494 "data block (%u) to free entry list", blk);
495 goto out_buf;
496 }
497 } else {
498 ret = write_blk(info, blk, buf);
499 if (ret < 0) {
500 quota_error(dquot->dq_sb, "Can't write quota "
501 "data block %u", blk);
502 goto out_buf;
503 }
504 }
505 }
506 dquot->dq_off = 0; /* Quota is now unattached */
507 out_buf:
508 kfree(buf);
509 return ret;
510 }
511
512 /* Remove reference to dquot from tree */
remove_tree(struct qtree_mem_dqinfo * info,struct dquot * dquot,uint * blk,int depth)513 static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot,
514 uint *blk, int depth)
515 {
516 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
517 int ret = 0;
518 uint newblk;
519 __le32 *ref = (__le32 *)buf;
520
521 if (!buf)
522 return -ENOMEM;
523 ret = read_blk(info, *blk, buf);
524 if (ret < 0) {
525 quota_error(dquot->dq_sb, "Can't read quota data block %u",
526 *blk);
527 goto out_buf;
528 }
529 newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
530 ret = do_check_range(dquot->dq_sb, "block", newblk, QT_TREEOFF,
531 info->dqi_blocks - 1);
532 if (ret)
533 goto out_buf;
534
535 if (depth == info->dqi_qtree_depth - 1) {
536 ret = free_dqentry(info, dquot, newblk);
537 newblk = 0;
538 } else {
539 ret = remove_tree(info, dquot, &newblk, depth+1);
540 }
541 if (ret >= 0 && !newblk) {
542 int i;
543 ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0);
544 /* Block got empty? */
545 for (i = 0; i < (info->dqi_usable_bs >> 2) && !ref[i]; i++)
546 ;
547 /* Don't put the root block into the free block list */
548 if (i == (info->dqi_usable_bs >> 2)
549 && *blk != QT_TREEOFF) {
550 put_free_dqblk(info, buf, *blk);
551 *blk = 0;
552 } else {
553 ret = write_blk(info, *blk, buf);
554 if (ret < 0)
555 quota_error(dquot->dq_sb,
556 "Can't write quota tree block %u",
557 *blk);
558 }
559 }
560 out_buf:
561 kfree(buf);
562 return ret;
563 }
564
565 /* Delete dquot from tree */
qtree_delete_dquot(struct qtree_mem_dqinfo * info,struct dquot * dquot)566 int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
567 {
568 uint tmp = QT_TREEOFF;
569
570 if (!dquot->dq_off) /* Even not allocated? */
571 return 0;
572 return remove_tree(info, dquot, &tmp, 0);
573 }
574 EXPORT_SYMBOL(qtree_delete_dquot);
575
576 /* Find entry in block */
find_block_dqentry(struct qtree_mem_dqinfo * info,struct dquot * dquot,uint blk)577 static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info,
578 struct dquot *dquot, uint blk)
579 {
580 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
581 loff_t ret = 0;
582 int i;
583 char *ddquot;
584
585 if (!buf)
586 return -ENOMEM;
587 ret = read_blk(info, blk, buf);
588 if (ret < 0) {
589 quota_error(dquot->dq_sb, "Can't read quota tree "
590 "block %u", blk);
591 goto out_buf;
592 }
593 ddquot = buf + sizeof(struct qt_disk_dqdbheader);
594 for (i = 0; i < qtree_dqstr_in_blk(info); i++) {
595 if (info->dqi_ops->is_id(ddquot, dquot))
596 break;
597 ddquot += info->dqi_entry_size;
598 }
599 if (i == qtree_dqstr_in_blk(info)) {
600 quota_error(dquot->dq_sb,
601 "Quota for id %u referenced but not present",
602 from_kqid(&init_user_ns, dquot->dq_id));
603 ret = -EIO;
604 goto out_buf;
605 } else {
606 ret = ((loff_t)blk << info->dqi_blocksize_bits) + sizeof(struct
607 qt_disk_dqdbheader) + i * info->dqi_entry_size;
608 }
609 out_buf:
610 kfree(buf);
611 return ret;
612 }
613
614 /* Find entry for given id in the tree */
find_tree_dqentry(struct qtree_mem_dqinfo * info,struct dquot * dquot,uint blk,int depth)615 static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info,
616 struct dquot *dquot, uint blk, int depth)
617 {
618 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
619 loff_t ret = 0;
620 __le32 *ref = (__le32 *)buf;
621
622 if (!buf)
623 return -ENOMEM;
624 ret = read_blk(info, blk, buf);
625 if (ret < 0) {
626 quota_error(dquot->dq_sb, "Can't read quota tree block %u",
627 blk);
628 goto out_buf;
629 }
630 ret = 0;
631 blk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]);
632 if (!blk) /* No reference? */
633 goto out_buf;
634 ret = do_check_range(dquot->dq_sb, "block", blk, QT_TREEOFF,
635 info->dqi_blocks - 1);
636 if (ret)
637 goto out_buf;
638
639 if (depth < info->dqi_qtree_depth - 1)
640 ret = find_tree_dqentry(info, dquot, blk, depth+1);
641 else
642 ret = find_block_dqentry(info, dquot, blk);
643 out_buf:
644 kfree(buf);
645 return ret;
646 }
647
648 /* Find entry for given id in the tree - wrapper function */
find_dqentry(struct qtree_mem_dqinfo * info,struct dquot * dquot)649 static inline loff_t find_dqentry(struct qtree_mem_dqinfo *info,
650 struct dquot *dquot)
651 {
652 return find_tree_dqentry(info, dquot, QT_TREEOFF, 0);
653 }
654
qtree_read_dquot(struct qtree_mem_dqinfo * info,struct dquot * dquot)655 int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
656 {
657 int type = dquot->dq_id.type;
658 struct super_block *sb = dquot->dq_sb;
659 loff_t offset;
660 char *ddquot;
661 int ret = 0;
662
663 #ifdef __QUOTA_QT_PARANOIA
664 /* Invalidated quota? */
665 if (!sb_dqopt(dquot->dq_sb)->files[type]) {
666 quota_error(sb, "Quota invalidated while reading!");
667 return -EIO;
668 }
669 #endif
670 /* Do we know offset of the dquot entry in the quota file? */
671 if (!dquot->dq_off) {
672 offset = find_dqentry(info, dquot);
673 if (offset <= 0) { /* Entry not present? */
674 if (offset < 0)
675 quota_error(sb,"Can't read quota structure "
676 "for id %u",
677 from_kqid(&init_user_ns,
678 dquot->dq_id));
679 dquot->dq_off = 0;
680 set_bit(DQ_FAKE_B, &dquot->dq_flags);
681 memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
682 ret = offset;
683 goto out;
684 }
685 dquot->dq_off = offset;
686 }
687 ddquot = kmalloc(info->dqi_entry_size, GFP_NOFS);
688 if (!ddquot)
689 return -ENOMEM;
690 ret = sb->s_op->quota_read(sb, type, ddquot, info->dqi_entry_size,
691 dquot->dq_off);
692 if (ret != info->dqi_entry_size) {
693 if (ret >= 0)
694 ret = -EIO;
695 quota_error(sb, "Error while reading quota structure for id %u",
696 from_kqid(&init_user_ns, dquot->dq_id));
697 set_bit(DQ_FAKE_B, &dquot->dq_flags);
698 memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk));
699 kfree(ddquot);
700 goto out;
701 }
702 spin_lock(&dquot->dq_dqb_lock);
703 info->dqi_ops->disk2mem_dqblk(dquot, ddquot);
704 if (!dquot->dq_dqb.dqb_bhardlimit &&
705 !dquot->dq_dqb.dqb_bsoftlimit &&
706 !dquot->dq_dqb.dqb_ihardlimit &&
707 !dquot->dq_dqb.dqb_isoftlimit)
708 set_bit(DQ_FAKE_B, &dquot->dq_flags);
709 spin_unlock(&dquot->dq_dqb_lock);
710 kfree(ddquot);
711 out:
712 dqstats_inc(DQST_READS);
713 return ret;
714 }
715 EXPORT_SYMBOL(qtree_read_dquot);
716
717 /* Check whether dquot should not be deleted. We know we are
718 * the only one operating on dquot (thanks to dq_lock) */
qtree_release_dquot(struct qtree_mem_dqinfo * info,struct dquot * dquot)719 int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
720 {
721 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) &&
722 !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace))
723 return qtree_delete_dquot(info, dquot);
724 return 0;
725 }
726 EXPORT_SYMBOL(qtree_release_dquot);
727
find_next_id(struct qtree_mem_dqinfo * info,qid_t * id,unsigned int blk,int depth)728 static int find_next_id(struct qtree_mem_dqinfo *info, qid_t *id,
729 unsigned int blk, int depth)
730 {
731 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS);
732 __le32 *ref = (__le32 *)buf;
733 ssize_t ret;
734 unsigned int epb = info->dqi_usable_bs >> 2;
735 unsigned int level_inc = 1;
736 int i;
737
738 if (!buf)
739 return -ENOMEM;
740
741 for (i = depth; i < info->dqi_qtree_depth - 1; i++)
742 level_inc *= epb;
743
744 ret = read_blk(info, blk, buf);
745 if (ret < 0) {
746 quota_error(info->dqi_sb,
747 "Can't read quota tree block %u", blk);
748 goto out_buf;
749 }
750 for (i = __get_index(info, *id, depth); i < epb; i++) {
751 uint blk_no = le32_to_cpu(ref[i]);
752
753 if (blk_no == 0) {
754 *id += level_inc;
755 continue;
756 }
757 ret = do_check_range(info->dqi_sb, "block", blk_no, 0,
758 info->dqi_blocks - 1);
759 if (ret)
760 goto out_buf;
761 if (depth == info->dqi_qtree_depth - 1) {
762 ret = 0;
763 goto out_buf;
764 }
765 ret = find_next_id(info, id, blk_no, depth + 1);
766 if (ret != -ENOENT)
767 break;
768 }
769 if (i == epb) {
770 ret = -ENOENT;
771 goto out_buf;
772 }
773 out_buf:
774 kfree(buf);
775 return ret;
776 }
777
qtree_get_next_id(struct qtree_mem_dqinfo * info,struct kqid * qid)778 int qtree_get_next_id(struct qtree_mem_dqinfo *info, struct kqid *qid)
779 {
780 qid_t id = from_kqid(&init_user_ns, *qid);
781 int ret;
782
783 ret = find_next_id(info, &id, QT_TREEOFF, 0);
784 if (ret < 0)
785 return ret;
786 *qid = make_kqid(&init_user_ns, qid->type, id);
787 return 0;
788 }
789 EXPORT_SYMBOL(qtree_get_next_id);
790