1 /*
2 * linux/fs/file_table.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8 #include <linux/string.h>
9 #include <linux/slab.h>
10 #include <linux/file.h>
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/smp_lock.h>
14 #include <linux/iobuf.h>
15
16 /* sysctl tunables... */
17 struct files_stat_struct files_stat = {0, 0, NR_FILE};
18
19 /* Here the new files go */
20 static LIST_HEAD(anon_list);
21 /* And here the free ones sit */
22 static LIST_HEAD(free_list);
23 /* public *and* exported. Not pretty! */
24 spinlock_t files_lock = SPIN_LOCK_UNLOCKED;
25
26 /* Find an unused file structure and return a pointer to it.
27 * Returns NULL, if there are no more free file structures or
28 * we run out of memory.
29 *
30 * SMP-safe.
31 */
get_empty_filp(void)32 struct file * get_empty_filp(void)
33 {
34 static int old_max = 0;
35 struct file * f;
36
37 file_list_lock();
38 if (files_stat.nr_free_files > NR_RESERVED_FILES) {
39 used_one:
40 f = list_entry(free_list.next, struct file, f_list);
41 list_del(&f->f_list);
42 files_stat.nr_free_files--;
43 new_one:
44 memset(f, 0, sizeof(*f));
45 atomic_set(&f->f_count,1);
46 f->f_version = ++event;
47 f->f_uid = current->fsuid;
48 f->f_gid = current->fsgid;
49 f->f_maxcount = INT_MAX;
50 list_add(&f->f_list, &anon_list);
51 file_list_unlock();
52 return f;
53 }
54 /*
55 * Use a reserved one if we're the superuser
56 */
57 if (files_stat.nr_free_files && !current->euid)
58 goto used_one;
59 /*
60 * Allocate a new one if we're below the limit.
61 */
62 if (files_stat.nr_files < files_stat.max_files) {
63 file_list_unlock();
64 f = kmem_cache_alloc(filp_cachep, SLAB_KERNEL);
65 file_list_lock();
66 if (f) {
67 files_stat.nr_files++;
68 goto new_one;
69 }
70 /* Big problems... */
71 printk(KERN_WARNING "VFS: filp allocation failed\n");
72
73 } else if (files_stat.max_files > old_max) {
74 printk(KERN_INFO "VFS: file-max limit %d reached\n", files_stat.max_files);
75 old_max = files_stat.max_files;
76 }
77 file_list_unlock();
78 return NULL;
79 }
80
81 /*
82 * Clear and initialize a (private) struct file for the given dentry,
83 * and call the open function (if any). The caller must verify that
84 * inode->i_fop is not NULL.
85 */
init_private_file(struct file * filp,struct dentry * dentry,int mode)86 int init_private_file(struct file *filp, struct dentry *dentry, int mode)
87 {
88 memset(filp, 0, sizeof(*filp));
89 filp->f_mode = mode;
90 atomic_set(&filp->f_count, 1);
91 filp->f_dentry = dentry;
92 filp->f_uid = current->fsuid;
93 filp->f_gid = current->fsgid;
94 filp->f_op = dentry->d_inode->i_fop;
95 filp->f_maxcount = INT_MAX;
96
97 if (filp->f_op->open)
98 return filp->f_op->open(dentry->d_inode, filp);
99 else
100 return 0;
101 }
102
fput(struct file * file)103 void fastcall fput(struct file * file)
104 {
105 struct dentry * dentry = file->f_dentry;
106 struct vfsmount * mnt = file->f_vfsmnt;
107 struct inode * inode = dentry->d_inode;
108
109 if (atomic_dec_and_test(&file->f_count)) {
110 locks_remove_flock(file);
111
112 if (file->f_iobuf)
113 free_kiovec(1, &file->f_iobuf);
114
115 if (file->f_op && file->f_op->release)
116 file->f_op->release(inode, file);
117 fops_put(file->f_op);
118 if (file->f_mode & FMODE_WRITE)
119 put_write_access(inode);
120 file_list_lock();
121 file->f_dentry = NULL;
122 file->f_vfsmnt = NULL;
123 list_del(&file->f_list);
124 list_add(&file->f_list, &free_list);
125 files_stat.nr_free_files++;
126 file_list_unlock();
127 dput(dentry);
128 mntput(mnt);
129 }
130 }
131
fget(unsigned int fd)132 struct file fastcall *fget(unsigned int fd)
133 {
134 struct file * file;
135 struct files_struct *files = current->files;
136
137 read_lock(&files->file_lock);
138 file = fcheck(fd);
139 if (file)
140 get_file(file);
141 read_unlock(&files->file_lock);
142 return file;
143 }
144
145 /* Here. put_filp() is SMP-safe now. */
146
put_filp(struct file * file)147 void put_filp(struct file *file)
148 {
149 if(atomic_dec_and_test(&file->f_count)) {
150 file_list_lock();
151 list_del(&file->f_list);
152 list_add(&file->f_list, &free_list);
153 files_stat.nr_free_files++;
154 file_list_unlock();
155 }
156 }
157
file_move(struct file * file,struct list_head * list)158 void file_move(struct file *file, struct list_head *list)
159 {
160 if (!list)
161 return;
162 file_list_lock();
163 list_del(&file->f_list);
164 list_add(&file->f_list, list);
165 file_list_unlock();
166 }
167
fs_may_remount_ro(struct super_block * sb)168 int fs_may_remount_ro(struct super_block *sb)
169 {
170 struct list_head *p;
171
172 /* Check that no files are currently opened for writing. */
173 file_list_lock();
174 for (p = sb->s_files.next; p != &sb->s_files; p = p->next) {
175 struct file *file = list_entry(p, struct file, f_list);
176 struct inode *inode = file->f_dentry->d_inode;
177
178 /* File with pending delete? */
179 if (inode->i_nlink == 0)
180 goto too_bad;
181
182 /* Writable file? */
183 if (S_ISREG(inode->i_mode) && (file->f_mode & FMODE_WRITE))
184 goto too_bad;
185 }
186 file_list_unlock();
187 return 1; /* Tis' cool bro. */
188 too_bad:
189 file_list_unlock();
190 return 0;
191 }
192
files_init(unsigned long mempages)193 void __init files_init(unsigned long mempages)
194 {
195 int n;
196 /* One file with associated inode and dcache is very roughly 1K.
197 * Per default don't use more than 10% of our memory for files.
198 */
199
200 n = (mempages * (PAGE_SIZE / 1024)) / 10;
201 files_stat.max_files = n;
202 if (files_stat.max_files < NR_FILE)
203 files_stat.max_files = NR_FILE;
204 }
205
206