1 /*
2  *  linux/fs/minix/dir.c
3  *
4  *  Copyright (C) 1991, 1992 Linus Torvalds
5  *
6  *  minix directory handling functions
7  *
8  *  Updated to filesystem version 3 by Daniel Aragones
9  */
10 
11 #include "minix.h"
12 #include <linux/buffer_head.h>
13 #include <linux/highmem.h>
14 #include <linux/swap.h>
15 
16 typedef struct minix_dir_entry minix_dirent;
17 typedef struct minix3_dir_entry minix3_dirent;
18 
19 static int minix_readdir(struct file *, void *, filldir_t);
20 
21 const struct file_operations minix_dir_operations = {
22 	.llseek		= generic_file_llseek,
23 	.read		= generic_read_dir,
24 	.readdir	= minix_readdir,
25 	.fsync		= generic_file_fsync,
26 };
27 
dir_put_page(struct page * page)28 static inline void dir_put_page(struct page *page)
29 {
30 	kunmap(page);
31 	page_cache_release(page);
32 }
33 
34 /*
35  * Return the offset into page `page_nr' of the last valid
36  * byte in that page, plus one.
37  */
38 static unsigned
minix_last_byte(struct inode * inode,unsigned long page_nr)39 minix_last_byte(struct inode *inode, unsigned long page_nr)
40 {
41 	unsigned last_byte = PAGE_CACHE_SIZE;
42 
43 	if (page_nr == (inode->i_size >> PAGE_CACHE_SHIFT))
44 		last_byte = inode->i_size & (PAGE_CACHE_SIZE - 1);
45 	return last_byte;
46 }
47 
dir_pages(struct inode * inode)48 static inline unsigned long dir_pages(struct inode *inode)
49 {
50 	return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT;
51 }
52 
dir_commit_chunk(struct page * page,loff_t pos,unsigned len)53 static int dir_commit_chunk(struct page *page, loff_t pos, unsigned len)
54 {
55 	struct address_space *mapping = page->mapping;
56 	struct inode *dir = mapping->host;
57 	int err = 0;
58 	block_write_end(NULL, mapping, pos, len, len, page, NULL);
59 
60 	if (pos+len > dir->i_size) {
61 		i_size_write(dir, pos+len);
62 		mark_inode_dirty(dir);
63 	}
64 	if (IS_DIRSYNC(dir))
65 		err = write_one_page(page, 1);
66 	else
67 		unlock_page(page);
68 	return err;
69 }
70 
dir_get_page(struct inode * dir,unsigned long n)71 static struct page * dir_get_page(struct inode *dir, unsigned long n)
72 {
73 	struct address_space *mapping = dir->i_mapping;
74 	struct page *page = read_mapping_page(mapping, n, NULL);
75 	if (!IS_ERR(page))
76 		kmap(page);
77 	return page;
78 }
79 
minix_next_entry(void * de,struct minix_sb_info * sbi)80 static inline void *minix_next_entry(void *de, struct minix_sb_info *sbi)
81 {
82 	return (void*)((char*)de + sbi->s_dirsize);
83 }
84 
minix_readdir(struct file * filp,void * dirent,filldir_t filldir)85 static int minix_readdir(struct file * filp, void * dirent, filldir_t filldir)
86 {
87 	unsigned long pos = filp->f_pos;
88 	struct inode *inode = filp->f_path.dentry->d_inode;
89 	struct super_block *sb = inode->i_sb;
90 	unsigned offset = pos & ~PAGE_CACHE_MASK;
91 	unsigned long n = pos >> PAGE_CACHE_SHIFT;
92 	unsigned long npages = dir_pages(inode);
93 	struct minix_sb_info *sbi = minix_sb(sb);
94 	unsigned chunk_size = sbi->s_dirsize;
95 	char *name;
96 	__u32 inumber;
97 
98 	pos = (pos + chunk_size-1) & ~(chunk_size-1);
99 	if (pos >= inode->i_size)
100 		goto done;
101 
102 	for ( ; n < npages; n++, offset = 0) {
103 		char *p, *kaddr, *limit;
104 		struct page *page = dir_get_page(inode, n);
105 
106 		if (IS_ERR(page))
107 			continue;
108 		kaddr = (char *)page_address(page);
109 		p = kaddr+offset;
110 		limit = kaddr + minix_last_byte(inode, n) - chunk_size;
111 		for ( ; p <= limit; p = minix_next_entry(p, sbi)) {
112 			if (sbi->s_version == MINIX_V3) {
113 				minix3_dirent *de3 = (minix3_dirent *)p;
114 				name = de3->name;
115 				inumber = de3->inode;
116 	 		} else {
117 				minix_dirent *de = (minix_dirent *)p;
118 				name = de->name;
119 				inumber = de->inode;
120 			}
121 			if (inumber) {
122 				int over;
123 
124 				unsigned l = strnlen(name, sbi->s_namelen);
125 				offset = p - kaddr;
126 				over = filldir(dirent, name, l,
127 					(n << PAGE_CACHE_SHIFT) | offset,
128 					inumber, DT_UNKNOWN);
129 				if (over) {
130 					dir_put_page(page);
131 					goto done;
132 				}
133 			}
134 		}
135 		dir_put_page(page);
136 	}
137 
138 done:
139 	filp->f_pos = (n << PAGE_CACHE_SHIFT) | offset;
140 	return 0;
141 }
142 
namecompare(int len,int maxlen,const char * name,const char * buffer)143 static inline int namecompare(int len, int maxlen,
144 	const char * name, const char * buffer)
145 {
146 	if (len < maxlen && buffer[len])
147 		return 0;
148 	return !memcmp(name, buffer, len);
149 }
150 
151 /*
152  *	minix_find_entry()
153  *
154  * finds an entry in the specified directory with the wanted name. It
155  * returns the cache buffer in which the entry was found, and the entry
156  * itself (as a parameter - res_dir). It does NOT read the inode of the
157  * entry - you'll have to do that yourself if you want to.
158  */
minix_find_entry(struct dentry * dentry,struct page ** res_page)159 minix_dirent *minix_find_entry(struct dentry *dentry, struct page **res_page)
160 {
161 	const char * name = dentry->d_name.name;
162 	int namelen = dentry->d_name.len;
163 	struct inode * dir = dentry->d_parent->d_inode;
164 	struct super_block * sb = dir->i_sb;
165 	struct minix_sb_info * sbi = minix_sb(sb);
166 	unsigned long n;
167 	unsigned long npages = dir_pages(dir);
168 	struct page *page = NULL;
169 	char *p;
170 
171 	char *namx;
172 	__u32 inumber;
173 	*res_page = NULL;
174 
175 	for (n = 0; n < npages; n++) {
176 		char *kaddr, *limit;
177 
178 		page = dir_get_page(dir, n);
179 		if (IS_ERR(page))
180 			continue;
181 
182 		kaddr = (char*)page_address(page);
183 		limit = kaddr + minix_last_byte(dir, n) - sbi->s_dirsize;
184 		for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
185 			if (sbi->s_version == MINIX_V3) {
186 				minix3_dirent *de3 = (minix3_dirent *)p;
187 				namx = de3->name;
188 				inumber = de3->inode;
189  			} else {
190 				minix_dirent *de = (minix_dirent *)p;
191 				namx = de->name;
192 				inumber = de->inode;
193 			}
194 			if (!inumber)
195 				continue;
196 			if (namecompare(namelen, sbi->s_namelen, name, namx))
197 				goto found;
198 		}
199 		dir_put_page(page);
200 	}
201 	return NULL;
202 
203 found:
204 	*res_page = page;
205 	return (minix_dirent *)p;
206 }
207 
minix_add_link(struct dentry * dentry,struct inode * inode)208 int minix_add_link(struct dentry *dentry, struct inode *inode)
209 {
210 	struct inode *dir = dentry->d_parent->d_inode;
211 	const char * name = dentry->d_name.name;
212 	int namelen = dentry->d_name.len;
213 	struct super_block * sb = dir->i_sb;
214 	struct minix_sb_info * sbi = minix_sb(sb);
215 	struct page *page = NULL;
216 	unsigned long npages = dir_pages(dir);
217 	unsigned long n;
218 	char *kaddr, *p;
219 	minix_dirent *de;
220 	minix3_dirent *de3;
221 	loff_t pos;
222 	int err;
223 	char *namx = NULL;
224 	__u32 inumber;
225 
226 	/*
227 	 * We take care of directory expansion in the same loop
228 	 * This code plays outside i_size, so it locks the page
229 	 * to protect that region.
230 	 */
231 	for (n = 0; n <= npages; n++) {
232 		char *limit, *dir_end;
233 
234 		page = dir_get_page(dir, n);
235 		err = PTR_ERR(page);
236 		if (IS_ERR(page))
237 			goto out;
238 		lock_page(page);
239 		kaddr = (char*)page_address(page);
240 		dir_end = kaddr + minix_last_byte(dir, n);
241 		limit = kaddr + PAGE_CACHE_SIZE - sbi->s_dirsize;
242 		for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
243 			de = (minix_dirent *)p;
244 			de3 = (minix3_dirent *)p;
245 			if (sbi->s_version == MINIX_V3) {
246 				namx = de3->name;
247 				inumber = de3->inode;
248 		 	} else {
249   				namx = de->name;
250 				inumber = de->inode;
251 			}
252 			if (p == dir_end) {
253 				/* We hit i_size */
254 				if (sbi->s_version == MINIX_V3)
255 					de3->inode = 0;
256 		 		else
257 					de->inode = 0;
258 				goto got_it;
259 			}
260 			if (!inumber)
261 				goto got_it;
262 			err = -EEXIST;
263 			if (namecompare(namelen, sbi->s_namelen, name, namx))
264 				goto out_unlock;
265 		}
266 		unlock_page(page);
267 		dir_put_page(page);
268 	}
269 	BUG();
270 	return -EINVAL;
271 
272 got_it:
273 	pos = page_offset(page) + p - (char *)page_address(page);
274 	err = minix_prepare_chunk(page, pos, sbi->s_dirsize);
275 	if (err)
276 		goto out_unlock;
277 	memcpy (namx, name, namelen);
278 	if (sbi->s_version == MINIX_V3) {
279 		memset (namx + namelen, 0, sbi->s_dirsize - namelen - 4);
280 		de3->inode = inode->i_ino;
281 	} else {
282 		memset (namx + namelen, 0, sbi->s_dirsize - namelen - 2);
283 		de->inode = inode->i_ino;
284 	}
285 	err = dir_commit_chunk(page, pos, sbi->s_dirsize);
286 	dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
287 	mark_inode_dirty(dir);
288 out_put:
289 	dir_put_page(page);
290 out:
291 	return err;
292 out_unlock:
293 	unlock_page(page);
294 	goto out_put;
295 }
296 
minix_delete_entry(struct minix_dir_entry * de,struct page * page)297 int minix_delete_entry(struct minix_dir_entry *de, struct page *page)
298 {
299 	struct inode *inode = page->mapping->host;
300 	char *kaddr = page_address(page);
301 	loff_t pos = page_offset(page) + (char*)de - kaddr;
302 	struct minix_sb_info *sbi = minix_sb(inode->i_sb);
303 	unsigned len = sbi->s_dirsize;
304 	int err;
305 
306 	lock_page(page);
307 	err = minix_prepare_chunk(page, pos, len);
308 	if (err == 0) {
309 		if (sbi->s_version == MINIX_V3)
310 			((minix3_dirent *) de)->inode = 0;
311 		else
312 			de->inode = 0;
313 		err = dir_commit_chunk(page, pos, len);
314 	} else {
315 		unlock_page(page);
316 	}
317 	dir_put_page(page);
318 	inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC;
319 	mark_inode_dirty(inode);
320 	return err;
321 }
322 
minix_make_empty(struct inode * inode,struct inode * dir)323 int minix_make_empty(struct inode *inode, struct inode *dir)
324 {
325 	struct page *page = grab_cache_page(inode->i_mapping, 0);
326 	struct minix_sb_info *sbi = minix_sb(inode->i_sb);
327 	char *kaddr;
328 	int err;
329 
330 	if (!page)
331 		return -ENOMEM;
332 	err = minix_prepare_chunk(page, 0, 2 * sbi->s_dirsize);
333 	if (err) {
334 		unlock_page(page);
335 		goto fail;
336 	}
337 
338 	kaddr = kmap_atomic(page, KM_USER0);
339 	memset(kaddr, 0, PAGE_CACHE_SIZE);
340 
341 	if (sbi->s_version == MINIX_V3) {
342 		minix3_dirent *de3 = (minix3_dirent *)kaddr;
343 
344 		de3->inode = inode->i_ino;
345 		strcpy(de3->name, ".");
346 		de3 = minix_next_entry(de3, sbi);
347 		de3->inode = dir->i_ino;
348 		strcpy(de3->name, "..");
349 	} else {
350 		minix_dirent *de = (minix_dirent *)kaddr;
351 
352 		de->inode = inode->i_ino;
353 		strcpy(de->name, ".");
354 		de = minix_next_entry(de, sbi);
355 		de->inode = dir->i_ino;
356 		strcpy(de->name, "..");
357 	}
358 	kunmap_atomic(kaddr, KM_USER0);
359 
360 	err = dir_commit_chunk(page, 0, 2 * sbi->s_dirsize);
361 fail:
362 	page_cache_release(page);
363 	return err;
364 }
365 
366 /*
367  * routine to check that the specified directory is empty (for rmdir)
368  */
minix_empty_dir(struct inode * inode)369 int minix_empty_dir(struct inode * inode)
370 {
371 	struct page *page = NULL;
372 	unsigned long i, npages = dir_pages(inode);
373 	struct minix_sb_info *sbi = minix_sb(inode->i_sb);
374 	char *name;
375 	__u32 inumber;
376 
377 	for (i = 0; i < npages; i++) {
378 		char *p, *kaddr, *limit;
379 
380 		page = dir_get_page(inode, i);
381 		if (IS_ERR(page))
382 			continue;
383 
384 		kaddr = (char *)page_address(page);
385 		limit = kaddr + minix_last_byte(inode, i) - sbi->s_dirsize;
386 		for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) {
387 			if (sbi->s_version == MINIX_V3) {
388 				minix3_dirent *de3 = (minix3_dirent *)p;
389 				name = de3->name;
390 				inumber = de3->inode;
391 			} else {
392 				minix_dirent *de = (minix_dirent *)p;
393 				name = de->name;
394 				inumber = de->inode;
395 			}
396 
397 			if (inumber != 0) {
398 				/* check for . and .. */
399 				if (name[0] != '.')
400 					goto not_empty;
401 				if (!name[1]) {
402 					if (inumber != inode->i_ino)
403 						goto not_empty;
404 				} else if (name[1] != '.')
405 					goto not_empty;
406 				else if (name[2])
407 					goto not_empty;
408 			}
409 		}
410 		dir_put_page(page);
411 	}
412 	return 1;
413 
414 not_empty:
415 	dir_put_page(page);
416 	return 0;
417 }
418 
419 /* Releases the page */
minix_set_link(struct minix_dir_entry * de,struct page * page,struct inode * inode)420 void minix_set_link(struct minix_dir_entry *de, struct page *page,
421 	struct inode *inode)
422 {
423 	struct inode *dir = page->mapping->host;
424 	struct minix_sb_info *sbi = minix_sb(dir->i_sb);
425 	loff_t pos = page_offset(page) +
426 			(char *)de-(char*)page_address(page);
427 	int err;
428 
429 	lock_page(page);
430 
431 	err = minix_prepare_chunk(page, pos, sbi->s_dirsize);
432 	if (err == 0) {
433 		if (sbi->s_version == MINIX_V3)
434 			((minix3_dirent *) de)->inode = inode->i_ino;
435 		else
436 			de->inode = inode->i_ino;
437 		err = dir_commit_chunk(page, pos, sbi->s_dirsize);
438 	} else {
439 		unlock_page(page);
440 	}
441 	dir_put_page(page);
442 	dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
443 	mark_inode_dirty(dir);
444 }
445 
minix_dotdot(struct inode * dir,struct page ** p)446 struct minix_dir_entry * minix_dotdot (struct inode *dir, struct page **p)
447 {
448 	struct page *page = dir_get_page(dir, 0);
449 	struct minix_sb_info *sbi = minix_sb(dir->i_sb);
450 	struct minix_dir_entry *de = NULL;
451 
452 	if (!IS_ERR(page)) {
453 		de = minix_next_entry(page_address(page), sbi);
454 		*p = page;
455 	}
456 	return de;
457 }
458 
minix_inode_by_name(struct dentry * dentry)459 ino_t minix_inode_by_name(struct dentry *dentry)
460 {
461 	struct page *page;
462 	struct minix_dir_entry *de = minix_find_entry(dentry, &page);
463 	ino_t res = 0;
464 
465 	if (de) {
466 		struct address_space *mapping = page->mapping;
467 		struct inode *inode = mapping->host;
468 		struct minix_sb_info *sbi = minix_sb(inode->i_sb);
469 
470 		if (sbi->s_version == MINIX_V3)
471 			res = ((minix3_dirent *) de)->inode;
472 		else
473 			res = de->inode;
474 		dir_put_page(page);
475 	}
476 	return res;
477 }
478