1 /*
2 * linux/fs/inode.c
3 *
4 * (C) 1997 Linus Torvalds
5 */
6
7 #include <linux/config.h>
8 #include <linux/fs.h>
9 #include <linux/string.h>
10 #include <linux/mm.h>
11 #include <linux/dcache.h>
12 #include <linux/init.h>
13 #include <linux/quotaops.h>
14 #include <linux/slab.h>
15 #include <linux/cache.h>
16 #include <linux/swap.h>
17 #include <linux/swapctl.h>
18 #include <linux/prefetch.h>
19 #include <linux/locks.h>
20
21 /*
22 * New inode.c implementation.
23 *
24 * This implementation has the basic premise of trying
25 * to be extremely low-overhead and SMP-safe, yet be
26 * simple enough to be "obviously correct".
27 *
28 * Famous last words.
29 */
30
31 /* inode dynamic allocation 1999, Andrea Arcangeli <andrea@suse.de> */
32
33 /* #define INODE_PARANOIA 1 */
34 /* #define INODE_DEBUG 1 */
35
36 /*
37 * Inode lookup is no longer as critical as it used to be:
38 * most of the lookups are going to be through the dcache.
39 */
40 #define I_HASHBITS i_hash_shift
41 #define I_HASHMASK i_hash_mask
42
43 static unsigned int i_hash_mask;
44 static unsigned int i_hash_shift;
45
46 /*
47 * Each inode can be on two separate lists. One is
48 * the hash list of the inode, used for lookups. The
49 * other linked list is the "type" list:
50 * "in_use" - valid inode, i_count > 0, i_nlink > 0
51 * "dirty" - as "in_use" but also dirty
52 * "unused" - valid inode, i_count = 0, no pages in the pagecache
53 * "unused_pagecache" - valid inode, i_count = 0, data in the pagecache
54 *
55 * A "dirty" list is maintained for each super block,
56 * allowing for low-overhead inode sync() operations.
57 */
58
59 static LIST_HEAD(inode_in_use);
60 static LIST_HEAD(inode_unused);
61 static LIST_HEAD(inode_unused_pagecache);
62 static struct list_head *inode_hashtable;
63 static LIST_HEAD(anon_hash_chain); /* for inodes with NULL i_sb */
64
65 /*
66 * A simple spinlock to protect the list manipulations.
67 *
68 * NOTE! You also have to own the lock if you change
69 * the i_state of an inode while it is in use..
70 */
71 static spinlock_t inode_lock = SPIN_LOCK_UNLOCKED;
72
73 /*
74 * Statistics gathering..
75 */
76 struct inodes_stat_t inodes_stat;
77
78 static kmem_cache_t * inode_cachep;
79
alloc_inode(struct super_block * sb)80 static struct inode *alloc_inode(struct super_block *sb)
81 {
82 static struct address_space_operations empty_aops;
83 static struct inode_operations empty_iops;
84 static struct file_operations empty_fops;
85 struct inode *inode;
86
87 if (sb->s_op->alloc_inode)
88 inode = sb->s_op->alloc_inode(sb);
89 else {
90 inode = (struct inode *) kmem_cache_alloc(inode_cachep, SLAB_KERNEL);
91 /* will die */
92 if (inode)
93 memset(&inode->u, 0, sizeof(inode->u));
94 }
95
96 if (inode) {
97 struct address_space * const mapping = &inode->i_data;
98
99 inode->i_sb = sb;
100 inode->i_dev = sb->s_dev;
101 inode->i_blkbits = sb->s_blocksize_bits;
102 inode->i_flags = 0;
103 atomic_set(&inode->i_count, 1);
104 inode->i_sock = 0;
105 inode->i_op = &empty_iops;
106 inode->i_fop = &empty_fops;
107 inode->i_nlink = 1;
108 atomic_set(&inode->i_writecount, 0);
109 inode->i_size = 0;
110 inode->i_blocks = 0;
111 inode->i_bytes = 0;
112 inode->i_generation = 0;
113 memset(&inode->i_dquot, 0, sizeof(inode->i_dquot));
114 inode->i_pipe = NULL;
115 inode->i_bdev = NULL;
116 inode->i_cdev = NULL;
117
118 mapping->a_ops = &empty_aops;
119 mapping->host = inode;
120 mapping->gfp_mask = GFP_HIGHUSER;
121 inode->i_mapping = mapping;
122 }
123 return inode;
124 }
125
destroy_inode(struct inode * inode)126 static void destroy_inode(struct inode *inode)
127 {
128 if (inode_has_buffers(inode))
129 BUG();
130 /* Reinitialise the waitqueue head because __wait_on_freeing_inode()
131 may have left stale entries on it which it can't remove (since
132 it knows we're freeing the inode right now */
133 init_waitqueue_head(&inode->i_wait);
134 if (inode->i_sb->s_op->destroy_inode)
135 inode->i_sb->s_op->destroy_inode(inode);
136 else
137 kmem_cache_free(inode_cachep, inode);
138 }
139
140
141 /*
142 * These are initializations that only need to be done
143 * once, because the fields are idempotent across use
144 * of the inode, so let the slab aware of that.
145 */
inode_init_once(struct inode * inode)146 void inode_init_once(struct inode *inode)
147 {
148 memset(inode, 0, sizeof(*inode));
149 __inode_init_once(inode);
150 }
151
__inode_init_once(struct inode * inode)152 void __inode_init_once(struct inode *inode)
153 {
154 init_waitqueue_head(&inode->i_wait);
155 INIT_LIST_HEAD(&inode->i_hash);
156 INIT_LIST_HEAD(&inode->i_data.clean_pages);
157 INIT_LIST_HEAD(&inode->i_data.dirty_pages);
158 INIT_LIST_HEAD(&inode->i_data.locked_pages);
159 INIT_LIST_HEAD(&inode->i_dentry);
160 INIT_LIST_HEAD(&inode->i_dirty_buffers);
161 INIT_LIST_HEAD(&inode->i_dirty_data_buffers);
162 INIT_LIST_HEAD(&inode->i_devices);
163 sema_init(&inode->i_sem, 1);
164 sema_init(&inode->i_zombie, 1);
165 init_rwsem(&inode->i_alloc_sem);
166 spin_lock_init(&inode->i_data.i_shared_lock);
167 }
168
init_once(void * foo,kmem_cache_t * cachep,unsigned long flags)169 static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
170 {
171 struct inode * inode = (struct inode *) foo;
172
173 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
174 SLAB_CTOR_CONSTRUCTOR)
175 inode_init_once(inode);
176 }
177
178 /*
179 * Put the inode on the super block's dirty list.
180 *
181 * CAREFUL! We mark it dirty unconditionally, but
182 * move it onto the dirty list only if it is hashed.
183 * If it was not hashed, it will never be added to
184 * the dirty list even if it is later hashed, as it
185 * will have been marked dirty already.
186 *
187 * In short, make sure you hash any inodes _before_
188 * you start marking them dirty..
189 */
190
191 /**
192 * __mark_inode_dirty - internal function
193 * @inode: inode to mark
194 * @flags: what kind of dirty (i.e. I_DIRTY_SYNC)
195 * Mark an inode as dirty. Callers should use mark_inode_dirty or
196 * mark_inode_dirty_sync.
197 */
198
__mark_inode_dirty(struct inode * inode,int flags)199 void __mark_inode_dirty(struct inode *inode, int flags)
200 {
201 struct super_block * sb = inode->i_sb;
202
203 if (!sb)
204 return;
205
206 /* Don't do this for I_DIRTY_PAGES - that doesn't actually dirty the inode itself */
207 if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
208 if (sb->s_op && sb->s_op->dirty_inode)
209 sb->s_op->dirty_inode(inode);
210 }
211
212 /* avoid the locking if we can */
213 if ((inode->i_state & flags) == flags)
214 return;
215
216 spin_lock(&inode_lock);
217 if ((inode->i_state & flags) != flags) {
218 inode->i_state |= flags;
219 /* Only add valid (ie hashed) inodes to the dirty list */
220 if (!(inode->i_state & (I_LOCK|I_FREEING|I_CLEAR)) &&
221 !list_empty(&inode->i_hash)) {
222 list_del(&inode->i_list);
223 list_add(&inode->i_list, &sb->s_dirty);
224 }
225 }
226 spin_unlock(&inode_lock);
227 }
228
__wait_on_inode(struct inode * inode)229 static void __wait_on_inode(struct inode * inode)
230 {
231 DECLARE_WAITQUEUE(wait, current);
232
233 add_wait_queue(&inode->i_wait, &wait);
234 repeat:
235 set_current_state(TASK_UNINTERRUPTIBLE);
236 if (inode->i_state & I_LOCK) {
237 schedule();
238 goto repeat;
239 }
240 remove_wait_queue(&inode->i_wait, &wait);
241 current->state = TASK_RUNNING;
242 }
243
wait_on_inode(struct inode * inode)244 static inline void wait_on_inode(struct inode *inode)
245 {
246 if (inode->i_state & I_LOCK)
247 __wait_on_inode(inode);
248 }
249
250 /*
251 * If we try to find an inode in the inode hash while it is being deleted, we
252 * have to wait until the filesystem completes its deletion before reporting
253 * that it isn't found. This is because iget will immediately call
254 * ->read_inode, and we want to be sure that evidence of the deletion is found
255 * by ->read_inode.
256 *
257 * Unlike the 2.6 version, this call call cannot return early, since inodes
258 * do not share wait queue. Therefore, we don't call remove_wait_queue(); it
259 * would be dangerous to do so since the inode may have already been freed,
260 * and it's unnecessary, since the inode is definitely going to get freed.
261 *
262 * This is called with inode_lock held.
263 */
__wait_on_freeing_inode(struct inode * inode)264 static void __wait_on_freeing_inode(struct inode *inode)
265 {
266 DECLARE_WAITQUEUE(wait, current);
267
268 add_wait_queue(&inode->i_wait, &wait);
269 set_current_state(TASK_UNINTERRUPTIBLE);
270 spin_unlock(&inode_lock);
271 schedule();
272
273 spin_lock(&inode_lock);
274 }
275
write_inode(struct inode * inode,int sync)276 static inline void write_inode(struct inode *inode, int sync)
277 {
278 if (inode->i_sb && inode->i_sb->s_op && inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
279 inode->i_sb->s_op->write_inode(inode, sync);
280 }
281
__iget(struct inode * inode)282 static inline void __iget(struct inode * inode)
283 {
284 if (atomic_read(&inode->i_count)) {
285 atomic_inc(&inode->i_count);
286 return;
287 }
288 atomic_inc(&inode->i_count);
289 if (!(inode->i_state & (I_DIRTY|I_LOCK))) {
290 list_del(&inode->i_list);
291 list_add(&inode->i_list, &inode_in_use);
292 }
293 inodes_stat.nr_unused--;
294 }
295
__refile_inode(struct inode * inode)296 static inline void __refile_inode(struct inode *inode)
297 {
298 struct list_head *to;
299
300 if (inode->i_state & (I_FREEING|I_CLEAR))
301 return;
302 if (list_empty(&inode->i_hash))
303 return;
304
305 if (inode->i_state & I_DIRTY)
306 to = &inode->i_sb->s_dirty;
307 else if (atomic_read(&inode->i_count))
308 to = &inode_in_use;
309 else if (inode->i_data.nrpages)
310 to = &inode_unused_pagecache;
311 else
312 to = &inode_unused;
313 list_del(&inode->i_list);
314 list_add(&inode->i_list, to);
315 }
316
refile_inode(struct inode * inode)317 void refile_inode(struct inode *inode)
318 {
319 if (!inode)
320 return;
321 spin_lock(&inode_lock);
322 if (!(inode->i_state & I_LOCK))
323 __refile_inode(inode);
324 spin_unlock(&inode_lock);
325 }
326
__sync_one(struct inode * inode,int sync)327 static inline void __sync_one(struct inode *inode, int sync)
328 {
329 unsigned dirty;
330
331 list_del(&inode->i_list);
332 list_add(&inode->i_list, &inode->i_sb->s_locked_inodes);
333
334 if (inode->i_state & (I_LOCK|I_FREEING))
335 BUG();
336
337 /* Set I_LOCK, reset I_DIRTY */
338 dirty = inode->i_state & I_DIRTY;
339 inode->i_state |= I_LOCK;
340 inode->i_state &= ~I_DIRTY;
341 spin_unlock(&inode_lock);
342
343 filemap_fdatasync(inode->i_mapping);
344
345 /* Don't write the inode if only I_DIRTY_PAGES was set */
346 if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC))
347 write_inode(inode, sync);
348
349 filemap_fdatawait(inode->i_mapping);
350
351 spin_lock(&inode_lock);
352 inode->i_state &= ~I_LOCK;
353 __refile_inode(inode);
354 wake_up(&inode->i_wait);
355 }
356
sync_one(struct inode * inode,int sync)357 static inline void sync_one(struct inode *inode, int sync)
358 {
359 while (inode->i_state & I_LOCK) {
360 __iget(inode);
361 spin_unlock(&inode_lock);
362 __wait_on_inode(inode);
363 iput(inode);
364 spin_lock(&inode_lock);
365 }
366
367 __sync_one(inode, sync);
368 }
369
sync_list(struct list_head * head)370 static inline void sync_list(struct list_head *head)
371 {
372 struct list_head * tmp;
373
374 while ((tmp = head->prev) != head)
375 __sync_one(list_entry(tmp, struct inode, i_list), 0);
376 }
377
wait_on_locked(struct list_head * head)378 static inline void wait_on_locked(struct list_head *head)
379 {
380 struct list_head * tmp;
381 while ((tmp = head->prev) != head) {
382 struct inode *inode = list_entry(tmp, struct inode, i_list);
383 __iget(inode);
384 spin_unlock(&inode_lock);
385 __wait_on_inode(inode);
386 iput(inode);
387 spin_lock(&inode_lock);
388 }
389 }
390
try_to_sync_unused_list(struct list_head * head,int nr_inodes)391 static inline int try_to_sync_unused_list(struct list_head *head, int nr_inodes)
392 {
393 struct list_head *tmp = head;
394 struct inode *inode;
395
396 while (nr_inodes && (tmp = tmp->prev) != head) {
397 inode = list_entry(tmp, struct inode, i_list);
398
399 if (!atomic_read(&inode->i_count)) {
400 __sync_one(inode, 0);
401 nr_inodes--;
402
403 /*
404 * __sync_one moved the inode to another list,
405 * so we have to start looking from the list head.
406 */
407 tmp = head;
408 }
409 }
410
411 return nr_inodes;
412 }
413
sync_inodes_sb(struct super_block * sb)414 void sync_inodes_sb(struct super_block *sb)
415 {
416 spin_lock(&inode_lock);
417 while (!list_empty(&sb->s_dirty)||!list_empty(&sb->s_locked_inodes)) {
418 sync_list(&sb->s_dirty);
419 wait_on_locked(&sb->s_locked_inodes);
420 }
421 spin_unlock(&inode_lock);
422 }
423
424 /*
425 * Note:
426 * We don't need to grab a reference to superblock here. If it has non-empty
427 * ->s_dirty it's hadn't been killed yet and kill_super() won't proceed
428 * past sync_inodes_sb() until both ->s_dirty and ->s_locked_inodes are
429 * empty. Since __sync_one() regains inode_lock before it finally moves
430 * inode from superblock lists we are OK.
431 */
432
sync_unlocked_inodes(void)433 void sync_unlocked_inodes(void)
434 {
435 struct super_block * sb;
436 spin_lock(&inode_lock);
437 spin_lock(&sb_lock);
438 sb = sb_entry(super_blocks.next);
439 for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.next)) {
440 if (!list_empty(&sb->s_dirty)) {
441 spin_unlock(&sb_lock);
442 sync_list(&sb->s_dirty);
443 spin_lock(&sb_lock);
444 }
445 }
446 spin_unlock(&sb_lock);
447 spin_unlock(&inode_lock);
448 }
449
450 /*
451 * Find a superblock with inodes that need to be synced
452 */
453
get_super_to_sync(void)454 static struct super_block *get_super_to_sync(void)
455 {
456 struct list_head *p;
457 restart:
458 spin_lock(&inode_lock);
459 spin_lock(&sb_lock);
460 list_for_each(p, &super_blocks) {
461 struct super_block *s = list_entry(p,struct super_block,s_list);
462 if (list_empty(&s->s_dirty) && list_empty(&s->s_locked_inodes))
463 continue;
464 s->s_count++;
465 spin_unlock(&sb_lock);
466 spin_unlock(&inode_lock);
467 down_read(&s->s_umount);
468 if (!s->s_root) {
469 drop_super(s);
470 goto restart;
471 }
472 return s;
473 }
474 spin_unlock(&sb_lock);
475 spin_unlock(&inode_lock);
476 return NULL;
477 }
478
479 /**
480 * sync_inodes
481 * @dev: device to sync the inodes from.
482 *
483 * sync_inodes goes through the super block's dirty list,
484 * writes them out, and puts them back on the normal list.
485 */
486
sync_inodes(kdev_t dev)487 void sync_inodes(kdev_t dev)
488 {
489 struct super_block * s;
490
491 /*
492 * Search the super_blocks array for the device(s) to sync.
493 */
494 if (dev) {
495 if ((s = get_super(dev)) != NULL) {
496 sync_inodes_sb(s);
497 drop_super(s);
498 }
499 } else {
500 while ((s = get_super_to_sync()) != NULL) {
501 sync_inodes_sb(s);
502 drop_super(s);
503 }
504 }
505 }
506
try_to_sync_unused_inodes(void * arg)507 static void try_to_sync_unused_inodes(void * arg)
508 {
509 struct super_block * sb;
510 int nr_inodes = inodes_stat.nr_unused;
511
512 spin_lock(&inode_lock);
513 spin_lock(&sb_lock);
514 sb = sb_entry(super_blocks.next);
515 for (; nr_inodes && sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.next)) {
516 if (list_empty(&sb->s_dirty))
517 continue;
518 spin_unlock(&sb_lock);
519 nr_inodes = try_to_sync_unused_list(&sb->s_dirty, nr_inodes);
520 spin_lock(&sb_lock);
521 }
522 spin_unlock(&sb_lock);
523 spin_unlock(&inode_lock);
524 }
525
526 static struct tq_struct unused_inodes_flush_task;
527
528 /**
529 * write_inode_now - write an inode to disk
530 * @inode: inode to write to disk
531 * @sync: whether the write should be synchronous or not
532 *
533 * This function commits an inode to disk immediately if it is
534 * dirty. This is primarily needed by knfsd.
535 */
536
write_inode_now(struct inode * inode,int sync)537 void write_inode_now(struct inode *inode, int sync)
538 {
539 struct super_block * sb = inode->i_sb;
540
541 if (sb) {
542 spin_lock(&inode_lock);
543 while (inode->i_state & I_DIRTY)
544 sync_one(inode, sync);
545 spin_unlock(&inode_lock);
546 if (sync)
547 wait_on_inode(inode);
548 }
549 else
550 printk(KERN_ERR "write_inode_now: no super block\n");
551 }
552
553 /**
554 * generic_osync_inode - flush all dirty data for a given inode to disk
555 * @inode: inode to write
556 * @datasync: if set, don't bother flushing timestamps
557 *
558 * This can be called by file_write functions for files which have the
559 * O_SYNC flag set, to flush dirty writes to disk.
560 */
561
generic_osync_inode(struct inode * inode,int what)562 int generic_osync_inode(struct inode *inode, int what)
563 {
564 int err = 0, err2 = 0, need_write_inode_now = 0;
565
566 /*
567 * WARNING
568 *
569 * Currently, the filesystem write path does not pass the
570 * filp down to the low-level write functions. Therefore it
571 * is impossible for (say) __block_commit_write to know if
572 * the operation is O_SYNC or not.
573 *
574 * Ideally, O_SYNC writes would have the filesystem call
575 * ll_rw_block as it went to kick-start the writes, and we
576 * could call osync_inode_buffers() here to wait only for
577 * those IOs which have already been submitted to the device
578 * driver layer. As it stands, if we did this we'd not write
579 * anything to disk since our writes have not been queued by
580 * this point: they are still on the dirty LRU.
581 *
582 * So, currently we will call fsync_inode_buffers() instead,
583 * to flush _all_ dirty buffers for this inode to disk on
584 * every O_SYNC write, not just the synchronous I/Os. --sct
585 */
586
587 if (what & OSYNC_METADATA)
588 err = fsync_inode_buffers(inode);
589 if (what & OSYNC_DATA)
590 err2 = fsync_inode_data_buffers(inode);
591 if (!err)
592 err = err2;
593
594 spin_lock(&inode_lock);
595 if ((inode->i_state & I_DIRTY) &&
596 ((what & OSYNC_INODE) || (inode->i_state & I_DIRTY_DATASYNC)))
597 need_write_inode_now = 1;
598 spin_unlock(&inode_lock);
599
600 if (need_write_inode_now)
601 write_inode_now(inode, 1);
602 else
603 wait_on_inode(inode);
604
605 return err;
606 }
607
608 /**
609 * clear_inode - clear an inode
610 * @inode: inode to clear
611 *
612 * This is called by the filesystem to tell us
613 * that the inode is no longer useful. We just
614 * terminate it with extreme prejudice.
615 */
616
clear_inode(struct inode * inode)617 void clear_inode(struct inode *inode)
618 {
619 invalidate_inode_buffers(inode);
620
621 if (inode->i_data.nrpages)
622 BUG();
623 if (!(inode->i_state & I_FREEING))
624 BUG();
625 if (inode->i_state & I_CLEAR)
626 BUG();
627 wait_on_inode(inode);
628 DQUOT_DROP(inode);
629 if (inode->i_sb && inode->i_sb->s_op && inode->i_sb->s_op->clear_inode)
630 inode->i_sb->s_op->clear_inode(inode);
631 if (inode->i_bdev)
632 bd_forget(inode);
633 else if (inode->i_cdev) {
634 cdput(inode->i_cdev);
635 inode->i_cdev = NULL;
636 }
637 spin_lock(&inode_lock);
638 inode->i_state = I_CLEAR;
639 spin_unlock(&inode_lock);
640 }
641
642 /*
643 * Dispose-list gets a local list with local inodes in it, so it doesn't
644 * need to worry about list corruption and SMP locks.
645 */
dispose_list(struct list_head * head)646 static void dispose_list(struct list_head *head)
647 {
648 int nr_disposed = 0;
649
650 while (!list_empty(head)) {
651 struct inode *inode;
652
653 inode = list_entry(head->next, struct inode, i_list);
654 list_del(&inode->i_list);
655
656 if (inode->i_data.nrpages)
657 truncate_inode_pages(&inode->i_data, 0);
658 clear_inode(inode);
659 spin_lock(&inode_lock);
660 list_del(&inode->i_hash);
661 INIT_LIST_HEAD(&inode->i_hash);
662 spin_unlock(&inode_lock);
663 wake_up(&inode->i_wait);
664 destroy_inode(inode);
665 nr_disposed++;
666 }
667 spin_lock(&inode_lock);
668 inodes_stat.nr_inodes -= nr_disposed;
669 spin_unlock(&inode_lock);
670 }
671
672 /*
673 * Invalidate all inodes for a device.
674 */
invalidate_list(struct list_head * head,struct super_block * sb,struct list_head * dispose)675 static int invalidate_list(struct list_head *head, struct super_block * sb, struct list_head * dispose)
676 {
677 struct list_head *next;
678 int busy = 0, count = 0;
679
680 next = head->next;
681 for (;;) {
682 struct list_head * tmp = next;
683 struct inode * inode;
684
685 next = next->next;
686 if (tmp == head)
687 break;
688 inode = list_entry(tmp, struct inode, i_list);
689 if (inode->i_sb != sb)
690 continue;
691 invalidate_inode_buffers(inode);
692 if (!atomic_read(&inode->i_count)) {
693 list_del_init(&inode->i_hash);
694 list_del(&inode->i_list);
695 list_add(&inode->i_list, dispose);
696 inode->i_state |= I_FREEING;
697 count++;
698 continue;
699 }
700 busy = 1;
701 }
702 /* only unused inodes may be cached with i_count zero */
703 inodes_stat.nr_unused -= count;
704 return busy;
705 }
706
707 /*
708 * This is a two-stage process. First we collect all
709 * offending inodes onto the throw-away list, and in
710 * the second stage we actually dispose of them. This
711 * is because we don't want to sleep while messing
712 * with the global lists..
713 */
714
715 /**
716 * invalidate_inodes - discard the inodes on a device
717 * @sb: superblock
718 *
719 * Discard all of the inodes for a given superblock. If the discard
720 * fails because there are busy inodes then a non zero value is returned.
721 * If the discard is successful all the inodes have been discarded.
722 */
723
invalidate_inodes(struct super_block * sb)724 int invalidate_inodes(struct super_block * sb)
725 {
726 int busy;
727 LIST_HEAD(throw_away);
728
729 spin_lock(&inode_lock);
730 busy = invalidate_list(&inode_in_use, sb, &throw_away);
731 busy |= invalidate_list(&inode_unused, sb, &throw_away);
732 busy |= invalidate_list(&inode_unused_pagecache, sb, &throw_away);
733 busy |= invalidate_list(&sb->s_dirty, sb, &throw_away);
734 busy |= invalidate_list(&sb->s_locked_inodes, sb, &throw_away);
735 spin_unlock(&inode_lock);
736
737 dispose_list(&throw_away);
738
739 return busy;
740 }
741
invalidate_device(kdev_t dev,int do_sync)742 int invalidate_device(kdev_t dev, int do_sync)
743 {
744 struct super_block *sb;
745 int res;
746
747 if (do_sync)
748 fsync_dev(dev);
749
750 res = 0;
751 sb = get_super(dev);
752 if (sb) {
753 /*
754 * no need to lock the super, get_super holds the
755 * read semaphore so the filesystem cannot go away
756 * under us (->put_super runs with the write lock
757 * hold).
758 */
759 shrink_dcache_sb(sb);
760 res = invalidate_inodes(sb);
761 drop_super(sb);
762 }
763 invalidate_buffers(dev);
764 return res;
765 }
766
767
768 /*
769 * This is called with the inode lock held. It searches
770 * the in-use for freeable inodes, which are moved to a
771 * temporary list and then placed on the unused list by
772 * dispose_list.
773 *
774 * We don't expect to have to call this very often.
775 *
776 * We leave the inode in the inode hash table until *after*
777 * the filesystem's ->delete_inode (in dispose_list) completes.
778 * This ensures that an iget (such as nfsd might instigate) will
779 * always find up-to-date information either in the hash or on disk.
780 *
781 * I_FREEING is set so that no-one will take a new reference
782 * to the inode while it is being deleted.
783 *
784 * N.B. The spinlock is released during the call to
785 * dispose_list.
786 */
787 #define CAN_UNUSE(inode) \
788 ((((inode)->i_state | (inode)->i_data.nrpages) == 0) && \
789 !inode_has_buffers(inode))
790 #define INODE(entry) (list_entry(entry, struct inode, i_list))
791
prune_icache(int goal)792 void prune_icache(int goal)
793 {
794 LIST_HEAD(list);
795 struct list_head *entry, *freeable = &list;
796 int count;
797 #ifdef CONFIG_HIGHMEM
798 int avg_pages;
799 #endif
800 struct inode * inode;
801
802 spin_lock(&inode_lock);
803
804 count = 0;
805 entry = inode_unused.prev;
806 while (entry != &inode_unused)
807 {
808 struct list_head *tmp = entry;
809
810 entry = entry->prev;
811 inode = INODE(tmp);
812 if (inode->i_state & (I_FREEING|I_CLEAR|I_LOCK))
813 continue;
814 if (!CAN_UNUSE(inode))
815 continue;
816 if (atomic_read(&inode->i_count))
817 continue;
818 list_del(tmp);
819 list_add(tmp, freeable);
820 inode->i_state |= I_FREEING;
821 count++;
822 if (--goal <= 0)
823 break;
824 }
825 inodes_stat.nr_unused -= count;
826 spin_unlock(&inode_lock);
827
828 dispose_list(freeable);
829
830 /*
831 * If we didn't freed enough clean inodes schedule
832 * a sync of the dirty inodes, we cannot do it
833 * from here or we're either synchronously dogslow
834 * or we deadlock with oom.
835 */
836 if (goal > 0)
837 schedule_task(&unused_inodes_flush_task);
838
839 #ifdef CONFIG_HIGHMEM
840 /*
841 * On highmem machines it is possible to have low memory
842 * filled with inodes that cannot be reclaimed because they
843 * have page cache pages in highmem attached to them.
844 * This could deadlock the system if the memory used by
845 * inodes is significant compared to the amount of freeable
846 * low memory. In that case we forcefully remove the page
847 * cache pages from the inodes we want to reclaim.
848 *
849 * Note that this loop doesn't actually reclaim the inodes;
850 * once the last pagecache pages belonging to the inode is
851 * gone it will be placed on the inode_unused list and the
852 * loop above will prune it the next time prune_icache() is
853 * called.
854 */
855 if (goal <= 0)
856 return;
857 if (inodes_stat.nr_unused <
858 (freeable_lowmem() * PAGE_SIZE) / (sizeof(struct inode) * 10))
859 return;
860
861 wakeup_bdflush();
862
863 avg_pages = page_cache_size;
864 avg_pages -= atomic_read(&buffermem_pages) + swapper_space.nrpages;
865 avg_pages = avg_pages / (inodes_stat.nr_inodes + 1);
866 spin_lock(&inode_lock);
867 while (goal-- > 0) {
868 if (list_empty(&inode_unused_pagecache))
869 break;
870 entry = inode_unused_pagecache.prev;
871 list_del(entry);
872 list_add(entry, &inode_unused_pagecache);
873
874 inode = INODE(entry);
875 /* Don't nuke inodes with lots of page cache attached. */
876 if (inode->i_mapping->nrpages > 5 * avg_pages)
877 continue;
878 /* Because of locking we grab the inode and unlock the list .*/
879 if (inode->i_state & I_LOCK)
880 continue;
881 inode->i_state |= I_LOCK;
882 spin_unlock(&inode_lock);
883
884 /*
885 * If the inode has clean pages only, we can free all its
886 * pagecache memory; the inode will automagically be refiled
887 * onto the unused_list. The wakeup_bdflush above makes
888 * sure that all inodes become clean eventually.
889 */
890 if (list_empty(&inode->i_mapping->dirty_pages) &&
891 !inode_has_buffers(inode))
892 invalidate_inode_pages(inode);
893
894 /* Release the inode again. */
895 spin_lock(&inode_lock);
896 inode->i_state &= ~I_LOCK;
897 wake_up(&inode->i_wait);
898 }
899 spin_unlock(&inode_lock);
900 #endif /* CONFIG_HIGHMEM */
901 }
902
shrink_icache_memory(int priority,int gfp_mask)903 int shrink_icache_memory(int priority, int gfp_mask)
904 {
905 int count = 0;
906
907 /*
908 * Nasty deadlock avoidance..
909 *
910 * We may hold various FS locks, and we don't
911 * want to recurse into the FS that called us
912 * in clear_inode() and friends..
913 */
914 if (!(gfp_mask & __GFP_FS))
915 return 0;
916
917 count = inodes_stat.nr_unused / priority;
918
919 prune_icache(count);
920 return kmem_cache_shrink(inode_cachep);
921 }
922
923 /*
924 * Called with the inode lock held.
925 * NOTE: we are not increasing the inode-refcount, you must call __iget()
926 * by hand after calling find_inode now! This simplifies iunique and won't
927 * add any additional branch in the common code.
928 */
find_inode(struct super_block * sb,unsigned long ino,struct list_head * head,find_inode_t find_actor,void * opaque)929 static struct inode * find_inode(struct super_block * sb, unsigned long ino, struct list_head *head, find_inode_t find_actor, void *opaque)
930 {
931 struct list_head *tmp;
932 struct inode * inode;
933
934 repeat:
935 tmp = head;
936 for (;;) {
937 tmp = tmp->next;
938 inode = NULL;
939 if (tmp == head)
940 break;
941 inode = list_entry(tmp, struct inode, i_hash);
942 if (inode->i_ino != ino)
943 continue;
944 if (inode->i_sb != sb)
945 continue;
946 if (find_actor && !find_actor(inode, ino, opaque))
947 continue;
948 if (inode->i_state & (I_FREEING|I_CLEAR)) {
949 __wait_on_freeing_inode(inode);
950 goto repeat;
951 }
952 break;
953 }
954 return inode;
955 }
956
957 /**
958 * new_inode - obtain an inode
959 * @sb: superblock
960 *
961 * Allocates a new inode for given superblock.
962 */
963
new_inode(struct super_block * sb)964 struct inode * new_inode(struct super_block *sb)
965 {
966 static unsigned long last_ino;
967 struct inode * inode;
968
969 spin_lock_prefetch(&inode_lock);
970
971 inode = alloc_inode(sb);
972 if (inode) {
973 spin_lock(&inode_lock);
974 inodes_stat.nr_inodes++;
975 list_add(&inode->i_list, &inode_in_use);
976 inode->i_ino = ++last_ino;
977 inode->i_state = 0;
978 spin_unlock(&inode_lock);
979 }
980 return inode;
981 }
982
unlock_new_inode(struct inode * inode)983 void unlock_new_inode(struct inode *inode)
984 {
985 /*
986 * This is special! We do not need the spinlock
987 * when clearing I_LOCK, because we're guaranteed
988 * that nobody else tries to do anything about the
989 * state of the inode when it is locked, as we
990 * just created it (so there can be no old holders
991 * that haven't tested I_LOCK).
992 */
993 inode->i_state &= ~(I_LOCK|I_NEW);
994 wake_up(&inode->i_wait);
995 }
996
997 /*
998 * This is called without the inode lock held.. Be careful.
999 *
1000 * We no longer cache the sb_flags in i_flags - see fs.h
1001 * -- rmk@arm.uk.linux.org
1002 */
get_new_inode(struct super_block * sb,unsigned long ino,struct list_head * head,find_inode_t find_actor,void * opaque)1003 static struct inode * get_new_inode(struct super_block *sb, unsigned long ino, struct list_head *head, find_inode_t find_actor, void *opaque)
1004 {
1005 struct inode * inode;
1006
1007 inode = alloc_inode(sb);
1008 if (inode) {
1009 struct inode * old;
1010
1011 spin_lock(&inode_lock);
1012 /* We released the lock, so.. */
1013 old = find_inode(sb, ino, head, find_actor, opaque);
1014 if (!old) {
1015 inodes_stat.nr_inodes++;
1016 list_add(&inode->i_list, &inode_in_use);
1017 list_add(&inode->i_hash, head);
1018 inode->i_ino = ino;
1019 inode->i_state = I_LOCK|I_NEW;
1020 spin_unlock(&inode_lock);
1021
1022 /*
1023 * Return the locked inode with I_NEW set, the
1024 * caller is responsible for filling in the contents
1025 */
1026 return inode;
1027 }
1028
1029 /*
1030 * Uhhuh, somebody else created the same inode under
1031 * us. Use the old inode instead of the one we just
1032 * allocated.
1033 */
1034 __iget(old);
1035 spin_unlock(&inode_lock);
1036 destroy_inode(inode);
1037 inode = old;
1038 wait_on_inode(inode);
1039 }
1040 return inode;
1041 }
1042
hash(struct super_block * sb,unsigned long i_ino)1043 static inline unsigned long hash(struct super_block *sb, unsigned long i_ino)
1044 {
1045 unsigned long tmp = i_ino + ((unsigned long) sb / L1_CACHE_BYTES);
1046 tmp = tmp + (tmp >> I_HASHBITS);
1047 return tmp & I_HASHMASK;
1048 }
1049
1050 /* Yeah, I know about quadratic hash. Maybe, later. */
1051
1052 /**
1053 * iunique - get a unique inode number
1054 * @sb: superblock
1055 * @max_reserved: highest reserved inode number
1056 *
1057 * Obtain an inode number that is unique on the system for a given
1058 * superblock. This is used by file systems that have no natural
1059 * permanent inode numbering system. An inode number is returned that
1060 * is higher than the reserved limit but unique.
1061 *
1062 * BUGS:
1063 * With a large number of inodes live on the file system this function
1064 * currently becomes quite slow.
1065 */
1066
iunique(struct super_block * sb,ino_t max_reserved)1067 ino_t iunique(struct super_block *sb, ino_t max_reserved)
1068 {
1069 static ino_t counter = 0;
1070 struct inode *inode;
1071 struct list_head * head;
1072 ino_t res;
1073 spin_lock(&inode_lock);
1074 retry:
1075 if (counter > max_reserved) {
1076 head = inode_hashtable + hash(sb,counter);
1077 inode = find_inode(sb, res = counter++, head, NULL, NULL);
1078 if (!inode) {
1079 spin_unlock(&inode_lock);
1080 return res;
1081 }
1082 } else {
1083 counter = max_reserved + 1;
1084 }
1085 goto retry;
1086
1087 }
1088
1089 /**
1090 * ilookup - search for an inode in the inode cache
1091 * @sb: super block of file system to search
1092 * @ino: inode number to search for
1093 *
1094 * If the inode is in the cache, the inode is returned with an
1095 * incremented reference count.
1096 *
1097 * Otherwise, %NULL is returned.
1098 *
1099 * This is almost certainly not the function you are looking for.
1100 * If you think you need to use this, consult an expert first.
1101 */
ilookup(struct super_block * sb,unsigned long ino)1102 struct inode *ilookup(struct super_block *sb, unsigned long ino)
1103 {
1104 struct list_head * head = inode_hashtable + hash(sb,ino);
1105 struct inode * inode;
1106
1107 spin_lock(&inode_lock);
1108 inode = find_inode(sb, ino, head, NULL, NULL);
1109 if (inode) {
1110 __iget(inode);
1111 spin_unlock(&inode_lock);
1112 wait_on_inode(inode);
1113 return inode;
1114 }
1115 spin_unlock(&inode_lock);
1116
1117 return inode;
1118 }
1119
igrab(struct inode * inode)1120 struct inode *igrab(struct inode *inode)
1121 {
1122 spin_lock(&inode_lock);
1123 if (!(inode->i_state & I_FREEING))
1124 __iget(inode);
1125 else
1126 /*
1127 * Handle the case where s_op->clear_inode is not been
1128 * called yet, and somebody is calling igrab
1129 * while the inode is getting freed.
1130 */
1131 inode = NULL;
1132 spin_unlock(&inode_lock);
1133 return inode;
1134 }
1135
iget4_locked(struct super_block * sb,unsigned long ino,find_inode_t find_actor,void * opaque)1136 struct inode *iget4_locked(struct super_block *sb, unsigned long ino, find_inode_t find_actor, void *opaque)
1137 {
1138 struct list_head * head = inode_hashtable + hash(sb,ino);
1139 struct inode * inode;
1140
1141 spin_lock(&inode_lock);
1142 inode = find_inode(sb, ino, head, find_actor, opaque);
1143 if (inode) {
1144 __iget(inode);
1145 spin_unlock(&inode_lock);
1146 wait_on_inode(inode);
1147 return inode;
1148 }
1149 spin_unlock(&inode_lock);
1150
1151 /*
1152 * get_new_inode() will do the right thing, re-trying the search
1153 * in case it had to block at any point.
1154 */
1155 return get_new_inode(sb, ino, head, find_actor, opaque);
1156 }
1157
1158 /**
1159 * insert_inode_hash - hash an inode
1160 * @inode: unhashed inode
1161 *
1162 * Add an inode to the inode hash for this superblock. If the inode
1163 * has no superblock it is added to a separate anonymous chain.
1164 */
1165
insert_inode_hash(struct inode * inode)1166 void insert_inode_hash(struct inode *inode)
1167 {
1168 struct list_head *head = &anon_hash_chain;
1169 if (inode->i_sb)
1170 head = inode_hashtable + hash(inode->i_sb, inode->i_ino);
1171 spin_lock(&inode_lock);
1172 list_add(&inode->i_hash, head);
1173 spin_unlock(&inode_lock);
1174 }
1175
1176 /**
1177 * remove_inode_hash - remove an inode from the hash
1178 * @inode: inode to unhash
1179 *
1180 * Remove an inode from the superblock or anonymous hash.
1181 */
1182
remove_inode_hash(struct inode * inode)1183 void remove_inode_hash(struct inode *inode)
1184 {
1185 spin_lock(&inode_lock);
1186 list_del(&inode->i_hash);
1187 INIT_LIST_HEAD(&inode->i_hash);
1188 spin_unlock(&inode_lock);
1189 }
1190
1191 /**
1192 * iput - put an inode
1193 * @inode: inode to put
1194 *
1195 * Puts an inode, dropping its usage count. If the inode use count hits
1196 * zero the inode is also then freed and may be destroyed.
1197 */
1198
iput(struct inode * inode)1199 void iput(struct inode *inode)
1200 {
1201 if (inode) {
1202 struct super_block *sb = inode->i_sb;
1203 struct super_operations *op = NULL;
1204
1205 if (inode->i_state == I_CLEAR)
1206 BUG();
1207
1208 if (sb && sb->s_op)
1209 op = sb->s_op;
1210 if (op && op->put_inode)
1211 op->put_inode(inode);
1212
1213 if (!atomic_dec_and_lock(&inode->i_count, &inode_lock))
1214 return;
1215
1216 if (!inode->i_nlink) {
1217 list_del(&inode->i_list);
1218 INIT_LIST_HEAD(&inode->i_list);
1219 inode->i_state|=I_FREEING;
1220 inodes_stat.nr_inodes--;
1221 spin_unlock(&inode_lock);
1222
1223 if (inode->i_data.nrpages)
1224 truncate_inode_pages(&inode->i_data, 0);
1225
1226 if (op && op->delete_inode) {
1227 void (*delete)(struct inode *) = op->delete_inode;
1228 if (!is_bad_inode(inode))
1229 DQUOT_INIT(inode);
1230 /* s_op->delete_inode internally recalls clear_inode() */
1231 delete(inode);
1232 } else
1233 clear_inode(inode);
1234 spin_lock(&inode_lock);
1235 list_del(&inode->i_hash);
1236 INIT_LIST_HEAD(&inode->i_hash);
1237 spin_unlock(&inode_lock);
1238 wake_up(&inode->i_wait);
1239 if (inode->i_state != I_CLEAR)
1240 BUG();
1241 } else {
1242 if (!list_empty(&inode->i_hash)) {
1243 if (!(inode->i_state & (I_DIRTY|I_LOCK)))
1244 __refile_inode(inode);
1245 inodes_stat.nr_unused++;
1246 spin_unlock(&inode_lock);
1247 if (!sb || (sb->s_flags & MS_ACTIVE))
1248 return;
1249 write_inode_now(inode, 1);
1250 spin_lock(&inode_lock);
1251 inodes_stat.nr_unused--;
1252 list_del_init(&inode->i_hash);
1253 }
1254 list_del_init(&inode->i_list);
1255 inode->i_state|=I_FREEING;
1256 inodes_stat.nr_inodes--;
1257 spin_unlock(&inode_lock);
1258 if (inode->i_data.nrpages)
1259 truncate_inode_pages(&inode->i_data, 0);
1260 clear_inode(inode);
1261 }
1262 destroy_inode(inode);
1263 }
1264 }
1265
force_delete(struct inode * inode)1266 void force_delete(struct inode *inode)
1267 {
1268 /*
1269 * Kill off unused inodes ... iput() will unhash and
1270 * delete the inode if we set i_nlink to zero.
1271 */
1272 if (atomic_read(&inode->i_count) == 1)
1273 inode->i_nlink = 0;
1274 }
1275
1276 /**
1277 * bmap - find a block number in a file
1278 * @inode: inode of file
1279 * @block: block to find
1280 *
1281 * Returns the block number on the device holding the inode that
1282 * is the disk block number for the block of the file requested.
1283 * That is, asked for block 4 of inode 1 the function will return the
1284 * disk block relative to the disk start that holds that block of the
1285 * file.
1286 */
1287
bmap(struct inode * inode,int block)1288 int bmap(struct inode * inode, int block)
1289 {
1290 int res = 0;
1291 if (inode->i_mapping->a_ops->bmap)
1292 res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block);
1293 return res;
1294 }
1295
1296 /*
1297 * Initialize the hash tables.
1298 */
inode_init(unsigned long mempages)1299 void __init inode_init(unsigned long mempages)
1300 {
1301 struct list_head *head;
1302 unsigned long order;
1303 unsigned int nr_hash;
1304 int i;
1305
1306 mempages >>= (14 - PAGE_SHIFT);
1307 mempages *= sizeof(struct list_head);
1308 for (order = 0; ((1UL << order) << PAGE_SHIFT) < mempages; order++)
1309 ;
1310
1311 do {
1312 unsigned long tmp;
1313
1314 nr_hash = (1UL << order) * PAGE_SIZE /
1315 sizeof(struct list_head);
1316 i_hash_mask = (nr_hash - 1);
1317
1318 tmp = nr_hash;
1319 i_hash_shift = 0;
1320 while ((tmp >>= 1UL) != 0UL)
1321 i_hash_shift++;
1322
1323 inode_hashtable = (struct list_head *)
1324 __get_free_pages(GFP_ATOMIC, order);
1325 } while (inode_hashtable == NULL && --order >= 0);
1326
1327 printk(KERN_INFO "Inode cache hash table entries: %d (order: %ld, %ld bytes)\n",
1328 nr_hash, order, (PAGE_SIZE << order));
1329
1330 if (!inode_hashtable)
1331 panic("Failed to allocate inode hash table\n");
1332
1333 head = inode_hashtable;
1334 i = nr_hash;
1335 do {
1336 INIT_LIST_HEAD(head);
1337 head++;
1338 i--;
1339 } while (i);
1340
1341 /* inode slab cache */
1342 inode_cachep = kmem_cache_create("inode_cache", sizeof(struct inode),
1343 0, SLAB_HWCACHE_ALIGN, init_once,
1344 NULL);
1345 if (!inode_cachep)
1346 panic("cannot create inode slab cache");
1347
1348 unused_inodes_flush_task.routine = try_to_sync_unused_inodes;
1349 }
1350
1351 /**
1352 * update_atime - update the access time
1353 * @inode: inode accessed
1354 *
1355 * Update the accessed time on an inode and mark it for writeback.
1356 * This function automatically handles read only file systems and media,
1357 * as well as the "noatime" flag and inode specific "noatime" markers.
1358 */
1359
update_atime(struct inode * inode)1360 void update_atime (struct inode *inode)
1361 {
1362 if (inode->i_atime == CURRENT_TIME)
1363 return;
1364 if (IS_NOATIME(inode))
1365 return;
1366 if (IS_NODIRATIME(inode) && S_ISDIR(inode->i_mode))
1367 return;
1368 if (IS_RDONLY(inode))
1369 return;
1370 inode->i_atime = CURRENT_TIME;
1371 mark_inode_dirty_sync (inode);
1372 }
1373
1374 /**
1375 * update_mctime - update the mtime and ctime
1376 * @inode: inode accessed
1377 *
1378 * Update the modified and changed times on an inode for writes to special
1379 * files such as fifos. No change is forced if the timestamps are already
1380 * up-to-date or if the filesystem is readonly.
1381 */
1382
update_mctime(struct inode * inode)1383 void update_mctime (struct inode *inode)
1384 {
1385 if (inode->i_mtime == CURRENT_TIME && inode->i_ctime == CURRENT_TIME)
1386 return;
1387 if (IS_RDONLY(inode))
1388 return;
1389 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
1390 mark_inode_dirty (inode);
1391 }
1392
1393
1394 /*
1395 * Quota functions that want to walk the inode lists..
1396 */
1397 #ifdef CONFIG_QUOTA
1398
1399 /* Functions back in dquot.c */
1400 void put_dquot_list(struct list_head *);
1401 int remove_inode_dquot_ref(struct inode *, short, struct list_head *);
1402
remove_dquot_ref(struct super_block * sb,short type)1403 void remove_dquot_ref(struct super_block *sb, short type)
1404 {
1405 struct inode *inode;
1406 struct list_head *act_head;
1407 LIST_HEAD(tofree_head);
1408
1409 if (!sb->dq_op)
1410 return; /* nothing to do */
1411 /* We have to be protected against other CPUs */
1412 lock_kernel(); /* This lock is for quota code */
1413 spin_lock(&inode_lock); /* This lock is for inodes code */
1414
1415 list_for_each(act_head, &inode_in_use) {
1416 inode = list_entry(act_head, struct inode, i_list);
1417 if (inode->i_sb == sb && IS_QUOTAINIT(inode))
1418 remove_inode_dquot_ref(inode, type, &tofree_head);
1419 }
1420 list_for_each(act_head, &inode_unused) {
1421 inode = list_entry(act_head, struct inode, i_list);
1422 if (inode->i_sb == sb && IS_QUOTAINIT(inode))
1423 remove_inode_dquot_ref(inode, type, &tofree_head);
1424 }
1425 list_for_each(act_head, &inode_unused_pagecache) {
1426 inode = list_entry(act_head, struct inode, i_list);
1427 if (inode->i_sb == sb && IS_QUOTAINIT(inode))
1428 remove_inode_dquot_ref(inode, type, &tofree_head);
1429 }
1430 list_for_each(act_head, &sb->s_dirty) {
1431 inode = list_entry(act_head, struct inode, i_list);
1432 if (IS_QUOTAINIT(inode))
1433 remove_inode_dquot_ref(inode, type, &tofree_head);
1434 }
1435 list_for_each(act_head, &sb->s_locked_inodes) {
1436 inode = list_entry(act_head, struct inode, i_list);
1437 if (IS_QUOTAINIT(inode))
1438 remove_inode_dquot_ref(inode, type, &tofree_head);
1439 }
1440 spin_unlock(&inode_lock);
1441 unlock_kernel();
1442
1443 put_dquot_list(&tofree_head);
1444 }
1445
1446 #endif
1447