1 /*
2  * inode.c - NILFS inode operations.
3  *
4  * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
19  *
20  * Written by Ryusuke Konishi <ryusuke@osrg.net>
21  *
22  */
23 
24 #include <linux/buffer_head.h>
25 #include <linux/gfp.h>
26 #include <linux/mpage.h>
27 #include <linux/writeback.h>
28 #include <linux/uio.h>
29 #include "nilfs.h"
30 #include "btnode.h"
31 #include "segment.h"
32 #include "page.h"
33 #include "mdt.h"
34 #include "cpfile.h"
35 #include "ifile.h"
36 
37 struct nilfs_iget_args {
38 	u64 ino;
39 	__u64 cno;
40 	struct nilfs_root *root;
41 	int for_gc;
42 };
43 
nilfs_inode_add_blocks(struct inode * inode,int n)44 void nilfs_inode_add_blocks(struct inode *inode, int n)
45 {
46 	struct nilfs_root *root = NILFS_I(inode)->i_root;
47 
48 	inode_add_bytes(inode, (1 << inode->i_blkbits) * n);
49 	if (root)
50 		atomic_add(n, &root->blocks_count);
51 }
52 
nilfs_inode_sub_blocks(struct inode * inode,int n)53 void nilfs_inode_sub_blocks(struct inode *inode, int n)
54 {
55 	struct nilfs_root *root = NILFS_I(inode)->i_root;
56 
57 	inode_sub_bytes(inode, (1 << inode->i_blkbits) * n);
58 	if (root)
59 		atomic_sub(n, &root->blocks_count);
60 }
61 
62 /**
63  * nilfs_get_block() - get a file block on the filesystem (callback function)
64  * @inode - inode struct of the target file
65  * @blkoff - file block number
66  * @bh_result - buffer head to be mapped on
67  * @create - indicate whether allocating the block or not when it has not
68  *      been allocated yet.
69  *
70  * This function does not issue actual read request of the specified data
71  * block. It is done by VFS.
72  */
nilfs_get_block(struct inode * inode,sector_t blkoff,struct buffer_head * bh_result,int create)73 int nilfs_get_block(struct inode *inode, sector_t blkoff,
74 		    struct buffer_head *bh_result, int create)
75 {
76 	struct nilfs_inode_info *ii = NILFS_I(inode);
77 	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
78 	__u64 blknum = 0;
79 	int err = 0, ret;
80 	unsigned maxblocks = bh_result->b_size >> inode->i_blkbits;
81 
82 	down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
83 	ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks);
84 	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
85 	if (ret >= 0) {	/* found */
86 		map_bh(bh_result, inode->i_sb, blknum);
87 		if (ret > 0)
88 			bh_result->b_size = (ret << inode->i_blkbits);
89 		goto out;
90 	}
91 	/* data block was not found */
92 	if (ret == -ENOENT && create) {
93 		struct nilfs_transaction_info ti;
94 
95 		bh_result->b_blocknr = 0;
96 		err = nilfs_transaction_begin(inode->i_sb, &ti, 1);
97 		if (unlikely(err))
98 			goto out;
99 		err = nilfs_bmap_insert(ii->i_bmap, (unsigned long)blkoff,
100 					(unsigned long)bh_result);
101 		if (unlikely(err != 0)) {
102 			if (err == -EEXIST) {
103 				/*
104 				 * The get_block() function could be called
105 				 * from multiple callers for an inode.
106 				 * However, the page having this block must
107 				 * be locked in this case.
108 				 */
109 				printk(KERN_WARNING
110 				       "nilfs_get_block: a race condition "
111 				       "while inserting a data block. "
112 				       "(inode number=%lu, file block "
113 				       "offset=%llu)\n",
114 				       inode->i_ino,
115 				       (unsigned long long)blkoff);
116 				err = 0;
117 			}
118 			nilfs_transaction_abort(inode->i_sb);
119 			goto out;
120 		}
121 		nilfs_mark_inode_dirty(inode);
122 		nilfs_transaction_commit(inode->i_sb); /* never fails */
123 		/* Error handling should be detailed */
124 		set_buffer_new(bh_result);
125 		set_buffer_delay(bh_result);
126 		map_bh(bh_result, inode->i_sb, 0); /* dbn must be changed
127 						      to proper value */
128 	} else if (ret == -ENOENT) {
129 		/* not found is not error (e.g. hole); must return without
130 		   the mapped state flag. */
131 		;
132 	} else {
133 		err = ret;
134 	}
135 
136  out:
137 	return err;
138 }
139 
140 /**
141  * nilfs_readpage() - implement readpage() method of nilfs_aops {}
142  * address_space_operations.
143  * @file - file struct of the file to be read
144  * @page - the page to be read
145  */
nilfs_readpage(struct file * file,struct page * page)146 static int nilfs_readpage(struct file *file, struct page *page)
147 {
148 	return mpage_readpage(page, nilfs_get_block);
149 }
150 
151 /**
152  * nilfs_readpages() - implement readpages() method of nilfs_aops {}
153  * address_space_operations.
154  * @file - file struct of the file to be read
155  * @mapping - address_space struct used for reading multiple pages
156  * @pages - the pages to be read
157  * @nr_pages - number of pages to be read
158  */
nilfs_readpages(struct file * file,struct address_space * mapping,struct list_head * pages,unsigned nr_pages)159 static int nilfs_readpages(struct file *file, struct address_space *mapping,
160 			   struct list_head *pages, unsigned nr_pages)
161 {
162 	return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block);
163 }
164 
nilfs_writepages(struct address_space * mapping,struct writeback_control * wbc)165 static int nilfs_writepages(struct address_space *mapping,
166 			    struct writeback_control *wbc)
167 {
168 	struct inode *inode = mapping->host;
169 	int err = 0;
170 
171 	if (wbc->sync_mode == WB_SYNC_ALL)
172 		err = nilfs_construct_dsync_segment(inode->i_sb, inode,
173 						    wbc->range_start,
174 						    wbc->range_end);
175 	return err;
176 }
177 
nilfs_writepage(struct page * page,struct writeback_control * wbc)178 static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
179 {
180 	struct inode *inode = page->mapping->host;
181 	int err;
182 
183 	redirty_page_for_writepage(wbc, page);
184 	unlock_page(page);
185 
186 	if (wbc->sync_mode == WB_SYNC_ALL) {
187 		err = nilfs_construct_segment(inode->i_sb);
188 		if (unlikely(err))
189 			return err;
190 	} else if (wbc->for_reclaim)
191 		nilfs_flush_segment(inode->i_sb, inode->i_ino);
192 
193 	return 0;
194 }
195 
nilfs_set_page_dirty(struct page * page)196 static int nilfs_set_page_dirty(struct page *page)
197 {
198 	int ret = __set_page_dirty_nobuffers(page);
199 
200 	if (page_has_buffers(page)) {
201 		struct inode *inode = page->mapping->host;
202 		unsigned nr_dirty = 0;
203 		struct buffer_head *bh, *head;
204 
205 		/*
206 		 * This page is locked by callers, and no other thread
207 		 * concurrently marks its buffers dirty since they are
208 		 * only dirtied through routines in fs/buffer.c in
209 		 * which call sites of mark_buffer_dirty are protected
210 		 * by page lock.
211 		 */
212 		bh = head = page_buffers(page);
213 		do {
214 			/* Do not mark hole blocks dirty */
215 			if (buffer_dirty(bh) || !buffer_mapped(bh))
216 				continue;
217 
218 			set_buffer_dirty(bh);
219 			nr_dirty++;
220 		} while (bh = bh->b_this_page, bh != head);
221 
222 		if (nr_dirty)
223 			nilfs_set_file_dirty(inode, nr_dirty);
224 	}
225 	return ret;
226 }
227 
nilfs_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned flags,struct page ** pagep,void ** fsdata)228 static int nilfs_write_begin(struct file *file, struct address_space *mapping,
229 			     loff_t pos, unsigned len, unsigned flags,
230 			     struct page **pagep, void **fsdata)
231 
232 {
233 	struct inode *inode = mapping->host;
234 	int err = nilfs_transaction_begin(inode->i_sb, NULL, 1);
235 
236 	if (unlikely(err))
237 		return err;
238 
239 	err = block_write_begin(mapping, pos, len, flags, pagep,
240 				nilfs_get_block);
241 	if (unlikely(err)) {
242 		loff_t isize = mapping->host->i_size;
243 		if (pos + len > isize)
244 			vmtruncate(mapping->host, isize);
245 
246 		nilfs_transaction_abort(inode->i_sb);
247 	}
248 	return err;
249 }
250 
nilfs_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct page * page,void * fsdata)251 static int nilfs_write_end(struct file *file, struct address_space *mapping,
252 			   loff_t pos, unsigned len, unsigned copied,
253 			   struct page *page, void *fsdata)
254 {
255 	struct inode *inode = mapping->host;
256 	unsigned start = pos & (PAGE_CACHE_SIZE - 1);
257 	unsigned nr_dirty;
258 	int err;
259 
260 	nr_dirty = nilfs_page_count_clean_buffers(page, start,
261 						  start + copied);
262 	copied = generic_write_end(file, mapping, pos, len, copied, page,
263 				   fsdata);
264 	nilfs_set_file_dirty(inode, nr_dirty);
265 	err = nilfs_transaction_commit(inode->i_sb);
266 	return err ? : copied;
267 }
268 
269 static ssize_t
nilfs_direct_IO(int rw,struct kiocb * iocb,const struct iovec * iov,loff_t offset,unsigned long nr_segs)270 nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
271 		loff_t offset, unsigned long nr_segs)
272 {
273 	struct file *file = iocb->ki_filp;
274 	struct inode *inode = file->f_mapping->host;
275 	ssize_t size;
276 
277 	if (rw == WRITE)
278 		return 0;
279 
280 	/* Needs synchronization with the cleaner */
281 	size = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
282 				  nilfs_get_block);
283 
284 	/*
285 	 * In case of error extending write may have instantiated a few
286 	 * blocks outside i_size. Trim these off again.
287 	 */
288 	if (unlikely((rw & WRITE) && size < 0)) {
289 		loff_t isize = i_size_read(inode);
290 		loff_t end = offset + iov_length(iov, nr_segs);
291 
292 		if (end > isize)
293 			vmtruncate(inode, isize);
294 	}
295 
296 	return size;
297 }
298 
299 const struct address_space_operations nilfs_aops = {
300 	.writepage		= nilfs_writepage,
301 	.readpage		= nilfs_readpage,
302 	.writepages		= nilfs_writepages,
303 	.set_page_dirty		= nilfs_set_page_dirty,
304 	.readpages		= nilfs_readpages,
305 	.write_begin		= nilfs_write_begin,
306 	.write_end		= nilfs_write_end,
307 	/* .releasepage		= nilfs_releasepage, */
308 	.invalidatepage		= block_invalidatepage,
309 	.direct_IO		= nilfs_direct_IO,
310 	.is_partially_uptodate  = block_is_partially_uptodate,
311 };
312 
nilfs_new_inode(struct inode * dir,umode_t mode)313 struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
314 {
315 	struct super_block *sb = dir->i_sb;
316 	struct the_nilfs *nilfs = sb->s_fs_info;
317 	struct inode *inode;
318 	struct nilfs_inode_info *ii;
319 	struct nilfs_root *root;
320 	int err = -ENOMEM;
321 	ino_t ino;
322 
323 	inode = new_inode(sb);
324 	if (unlikely(!inode))
325 		goto failed;
326 
327 	mapping_set_gfp_mask(inode->i_mapping,
328 			     mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
329 
330 	root = NILFS_I(dir)->i_root;
331 	ii = NILFS_I(inode);
332 	ii->i_state = 1 << NILFS_I_NEW;
333 	ii->i_root = root;
334 
335 	err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh);
336 	if (unlikely(err))
337 		goto failed_ifile_create_inode;
338 	/* reference count of i_bh inherits from nilfs_mdt_read_block() */
339 
340 	atomic_inc(&root->inodes_count);
341 	inode_init_owner(inode, dir, mode);
342 	inode->i_ino = ino;
343 	inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
344 
345 	if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
346 		err = nilfs_bmap_read(ii->i_bmap, NULL);
347 		if (err < 0)
348 			goto failed_bmap;
349 
350 		set_bit(NILFS_I_BMAP, &ii->i_state);
351 		/* No lock is needed; iget() ensures it. */
352 	}
353 
354 	ii->i_flags = nilfs_mask_flags(
355 		mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED);
356 
357 	/* ii->i_file_acl = 0; */
358 	/* ii->i_dir_acl = 0; */
359 	ii->i_dir_start_lookup = 0;
360 	nilfs_set_inode_flags(inode);
361 	spin_lock(&nilfs->ns_next_gen_lock);
362 	inode->i_generation = nilfs->ns_next_generation++;
363 	spin_unlock(&nilfs->ns_next_gen_lock);
364 	insert_inode_hash(inode);
365 
366 	err = nilfs_init_acl(inode, dir);
367 	if (unlikely(err))
368 		goto failed_acl; /* never occur. When supporting
369 				    nilfs_init_acl(), proper cancellation of
370 				    above jobs should be considered */
371 
372 	return inode;
373 
374  failed_acl:
375  failed_bmap:
376 	clear_nlink(inode);
377 	iput(inode);  /* raw_inode will be deleted through
378 			 generic_delete_inode() */
379 	goto failed;
380 
381  failed_ifile_create_inode:
382 	make_bad_inode(inode);
383 	iput(inode);  /* if i_nlink == 1, generic_forget_inode() will be
384 			 called */
385  failed:
386 	return ERR_PTR(err);
387 }
388 
nilfs_set_inode_flags(struct inode * inode)389 void nilfs_set_inode_flags(struct inode *inode)
390 {
391 	unsigned int flags = NILFS_I(inode)->i_flags;
392 
393 	inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME |
394 			    S_DIRSYNC);
395 	if (flags & FS_SYNC_FL)
396 		inode->i_flags |= S_SYNC;
397 	if (flags & FS_APPEND_FL)
398 		inode->i_flags |= S_APPEND;
399 	if (flags & FS_IMMUTABLE_FL)
400 		inode->i_flags |= S_IMMUTABLE;
401 	if (flags & FS_NOATIME_FL)
402 		inode->i_flags |= S_NOATIME;
403 	if (flags & FS_DIRSYNC_FL)
404 		inode->i_flags |= S_DIRSYNC;
405 	mapping_set_gfp_mask(inode->i_mapping,
406 			     mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
407 }
408 
nilfs_read_inode_common(struct inode * inode,struct nilfs_inode * raw_inode)409 int nilfs_read_inode_common(struct inode *inode,
410 			    struct nilfs_inode *raw_inode)
411 {
412 	struct nilfs_inode_info *ii = NILFS_I(inode);
413 	int err;
414 
415 	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
416 	inode->i_uid = (uid_t)le32_to_cpu(raw_inode->i_uid);
417 	inode->i_gid = (gid_t)le32_to_cpu(raw_inode->i_gid);
418 	set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
419 	inode->i_size = le64_to_cpu(raw_inode->i_size);
420 	inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
421 	inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
422 	inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
423 	inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
424 	inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
425 	inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
426 	if (inode->i_nlink == 0 && inode->i_mode == 0)
427 		return -EINVAL; /* this inode is deleted */
428 
429 	inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
430 	ii->i_flags = le32_to_cpu(raw_inode->i_flags);
431 #if 0
432 	ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
433 	ii->i_dir_acl = S_ISREG(inode->i_mode) ?
434 		0 : le32_to_cpu(raw_inode->i_dir_acl);
435 #endif
436 	ii->i_dir_start_lookup = 0;
437 	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
438 
439 	if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
440 	    S_ISLNK(inode->i_mode)) {
441 		err = nilfs_bmap_read(ii->i_bmap, raw_inode);
442 		if (err < 0)
443 			return err;
444 		set_bit(NILFS_I_BMAP, &ii->i_state);
445 		/* No lock is needed; iget() ensures it. */
446 	}
447 	return 0;
448 }
449 
__nilfs_read_inode(struct super_block * sb,struct nilfs_root * root,unsigned long ino,struct inode * inode)450 static int __nilfs_read_inode(struct super_block *sb,
451 			      struct nilfs_root *root, unsigned long ino,
452 			      struct inode *inode)
453 {
454 	struct the_nilfs *nilfs = sb->s_fs_info;
455 	struct buffer_head *bh;
456 	struct nilfs_inode *raw_inode;
457 	int err;
458 
459 	down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
460 	err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh);
461 	if (unlikely(err))
462 		goto bad_inode;
463 
464 	raw_inode = nilfs_ifile_map_inode(root->ifile, ino, bh);
465 
466 	err = nilfs_read_inode_common(inode, raw_inode);
467 	if (err)
468 		goto failed_unmap;
469 
470 	if (S_ISREG(inode->i_mode)) {
471 		inode->i_op = &nilfs_file_inode_operations;
472 		inode->i_fop = &nilfs_file_operations;
473 		inode->i_mapping->a_ops = &nilfs_aops;
474 	} else if (S_ISDIR(inode->i_mode)) {
475 		inode->i_op = &nilfs_dir_inode_operations;
476 		inode->i_fop = &nilfs_dir_operations;
477 		inode->i_mapping->a_ops = &nilfs_aops;
478 	} else if (S_ISLNK(inode->i_mode)) {
479 		inode->i_op = &nilfs_symlink_inode_operations;
480 		inode->i_mapping->a_ops = &nilfs_aops;
481 	} else {
482 		inode->i_op = &nilfs_special_inode_operations;
483 		init_special_inode(
484 			inode, inode->i_mode,
485 			huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
486 	}
487 	nilfs_ifile_unmap_inode(root->ifile, ino, bh);
488 	brelse(bh);
489 	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
490 	nilfs_set_inode_flags(inode);
491 	return 0;
492 
493  failed_unmap:
494 	nilfs_ifile_unmap_inode(root->ifile, ino, bh);
495 	brelse(bh);
496 
497  bad_inode:
498 	up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
499 	return err;
500 }
501 
nilfs_iget_test(struct inode * inode,void * opaque)502 static int nilfs_iget_test(struct inode *inode, void *opaque)
503 {
504 	struct nilfs_iget_args *args = opaque;
505 	struct nilfs_inode_info *ii;
506 
507 	if (args->ino != inode->i_ino || args->root != NILFS_I(inode)->i_root)
508 		return 0;
509 
510 	ii = NILFS_I(inode);
511 	if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
512 		return !args->for_gc;
513 
514 	return args->for_gc && args->cno == ii->i_cno;
515 }
516 
nilfs_iget_set(struct inode * inode,void * opaque)517 static int nilfs_iget_set(struct inode *inode, void *opaque)
518 {
519 	struct nilfs_iget_args *args = opaque;
520 
521 	inode->i_ino = args->ino;
522 	if (args->for_gc) {
523 		NILFS_I(inode)->i_state = 1 << NILFS_I_GCINODE;
524 		NILFS_I(inode)->i_cno = args->cno;
525 		NILFS_I(inode)->i_root = NULL;
526 	} else {
527 		if (args->root && args->ino == NILFS_ROOT_INO)
528 			nilfs_get_root(args->root);
529 		NILFS_I(inode)->i_root = args->root;
530 	}
531 	return 0;
532 }
533 
nilfs_ilookup(struct super_block * sb,struct nilfs_root * root,unsigned long ino)534 struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
535 			    unsigned long ino)
536 {
537 	struct nilfs_iget_args args = {
538 		.ino = ino, .root = root, .cno = 0, .for_gc = 0
539 	};
540 
541 	return ilookup5(sb, ino, nilfs_iget_test, &args);
542 }
543 
nilfs_iget_locked(struct super_block * sb,struct nilfs_root * root,unsigned long ino)544 struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
545 				unsigned long ino)
546 {
547 	struct nilfs_iget_args args = {
548 		.ino = ino, .root = root, .cno = 0, .for_gc = 0
549 	};
550 
551 	return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
552 }
553 
nilfs_iget(struct super_block * sb,struct nilfs_root * root,unsigned long ino)554 struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
555 			 unsigned long ino)
556 {
557 	struct inode *inode;
558 	int err;
559 
560 	inode = nilfs_iget_locked(sb, root, ino);
561 	if (unlikely(!inode))
562 		return ERR_PTR(-ENOMEM);
563 	if (!(inode->i_state & I_NEW))
564 		return inode;
565 
566 	err = __nilfs_read_inode(sb, root, ino, inode);
567 	if (unlikely(err)) {
568 		iget_failed(inode);
569 		return ERR_PTR(err);
570 	}
571 	unlock_new_inode(inode);
572 	return inode;
573 }
574 
nilfs_iget_for_gc(struct super_block * sb,unsigned long ino,__u64 cno)575 struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
576 				__u64 cno)
577 {
578 	struct nilfs_iget_args args = {
579 		.ino = ino, .root = NULL, .cno = cno, .for_gc = 1
580 	};
581 	struct inode *inode;
582 	int err;
583 
584 	inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
585 	if (unlikely(!inode))
586 		return ERR_PTR(-ENOMEM);
587 	if (!(inode->i_state & I_NEW))
588 		return inode;
589 
590 	err = nilfs_init_gcinode(inode);
591 	if (unlikely(err)) {
592 		iget_failed(inode);
593 		return ERR_PTR(err);
594 	}
595 	unlock_new_inode(inode);
596 	return inode;
597 }
598 
nilfs_write_inode_common(struct inode * inode,struct nilfs_inode * raw_inode,int has_bmap)599 void nilfs_write_inode_common(struct inode *inode,
600 			      struct nilfs_inode *raw_inode, int has_bmap)
601 {
602 	struct nilfs_inode_info *ii = NILFS_I(inode);
603 
604 	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
605 	raw_inode->i_uid = cpu_to_le32(inode->i_uid);
606 	raw_inode->i_gid = cpu_to_le32(inode->i_gid);
607 	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
608 	raw_inode->i_size = cpu_to_le64(inode->i_size);
609 	raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
610 	raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
611 	raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
612 	raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
613 	raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);
614 
615 	raw_inode->i_flags = cpu_to_le32(ii->i_flags);
616 	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
617 
618 	if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) {
619 		struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
620 
621 		/* zero-fill unused portion in the case of super root block */
622 		raw_inode->i_xattr = 0;
623 		raw_inode->i_pad = 0;
624 		memset((void *)raw_inode + sizeof(*raw_inode), 0,
625 		       nilfs->ns_inode_size - sizeof(*raw_inode));
626 	}
627 
628 	if (has_bmap)
629 		nilfs_bmap_write(ii->i_bmap, raw_inode);
630 	else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
631 		raw_inode->i_device_code =
632 			cpu_to_le64(huge_encode_dev(inode->i_rdev));
633 	/* When extending inode, nilfs->ns_inode_size should be checked
634 	   for substitutions of appended fields */
635 }
636 
nilfs_update_inode(struct inode * inode,struct buffer_head * ibh)637 void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh)
638 {
639 	ino_t ino = inode->i_ino;
640 	struct nilfs_inode_info *ii = NILFS_I(inode);
641 	struct inode *ifile = ii->i_root->ifile;
642 	struct nilfs_inode *raw_inode;
643 
644 	raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh);
645 
646 	if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
647 		memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size);
648 	set_bit(NILFS_I_INODE_DIRTY, &ii->i_state);
649 
650 	nilfs_write_inode_common(inode, raw_inode, 0);
651 		/* XXX: call with has_bmap = 0 is a workaround to avoid
652 		   deadlock of bmap. This delays update of i_bmap to just
653 		   before writing */
654 	nilfs_ifile_unmap_inode(ifile, ino, ibh);
655 }
656 
657 #define NILFS_MAX_TRUNCATE_BLOCKS	16384  /* 64MB for 4KB block */
658 
nilfs_truncate_bmap(struct nilfs_inode_info * ii,unsigned long from)659 static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
660 				unsigned long from)
661 {
662 	unsigned long b;
663 	int ret;
664 
665 	if (!test_bit(NILFS_I_BMAP, &ii->i_state))
666 		return;
667 repeat:
668 	ret = nilfs_bmap_last_key(ii->i_bmap, &b);
669 	if (ret == -ENOENT)
670 		return;
671 	else if (ret < 0)
672 		goto failed;
673 
674 	if (b < from)
675 		return;
676 
677 	b -= min_t(unsigned long, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
678 	ret = nilfs_bmap_truncate(ii->i_bmap, b);
679 	nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb);
680 	if (!ret || (ret == -ENOMEM &&
681 		     nilfs_bmap_truncate(ii->i_bmap, b) == 0))
682 		goto repeat;
683 
684 failed:
685 	nilfs_warning(ii->vfs_inode.i_sb, __func__,
686 		      "failed to truncate bmap (ino=%lu, err=%d)",
687 		      ii->vfs_inode.i_ino, ret);
688 }
689 
nilfs_truncate(struct inode * inode)690 void nilfs_truncate(struct inode *inode)
691 {
692 	unsigned long blkoff;
693 	unsigned int blocksize;
694 	struct nilfs_transaction_info ti;
695 	struct super_block *sb = inode->i_sb;
696 	struct nilfs_inode_info *ii = NILFS_I(inode);
697 
698 	if (!test_bit(NILFS_I_BMAP, &ii->i_state))
699 		return;
700 	if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
701 		return;
702 
703 	blocksize = sb->s_blocksize;
704 	blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits;
705 	nilfs_transaction_begin(sb, &ti, 0); /* never fails */
706 
707 	block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block);
708 
709 	nilfs_truncate_bmap(ii, blkoff);
710 
711 	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
712 	if (IS_SYNC(inode))
713 		nilfs_set_transaction_flag(NILFS_TI_SYNC);
714 
715 	nilfs_mark_inode_dirty(inode);
716 	nilfs_set_file_dirty(inode, 0);
717 	nilfs_transaction_commit(sb);
718 	/* May construct a logical segment and may fail in sync mode.
719 	   But truncate has no return value. */
720 }
721 
nilfs_clear_inode(struct inode * inode)722 static void nilfs_clear_inode(struct inode *inode)
723 {
724 	struct nilfs_inode_info *ii = NILFS_I(inode);
725 	struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
726 
727 	/*
728 	 * Free resources allocated in nilfs_read_inode(), here.
729 	 */
730 	BUG_ON(!list_empty(&ii->i_dirty));
731 	brelse(ii->i_bh);
732 	ii->i_bh = NULL;
733 
734 	if (mdi && mdi->mi_palloc_cache)
735 		nilfs_palloc_destroy_cache(inode);
736 
737 	if (test_bit(NILFS_I_BMAP, &ii->i_state))
738 		nilfs_bmap_clear(ii->i_bmap);
739 
740 	nilfs_btnode_cache_clear(&ii->i_btnode_cache);
741 
742 	if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
743 		nilfs_put_root(ii->i_root);
744 }
745 
nilfs_evict_inode(struct inode * inode)746 void nilfs_evict_inode(struct inode *inode)
747 {
748 	struct nilfs_transaction_info ti;
749 	struct super_block *sb = inode->i_sb;
750 	struct nilfs_inode_info *ii = NILFS_I(inode);
751 	int ret;
752 
753 	if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
754 		if (inode->i_data.nrpages)
755 			truncate_inode_pages(&inode->i_data, 0);
756 		end_writeback(inode);
757 		nilfs_clear_inode(inode);
758 		return;
759 	}
760 	nilfs_transaction_begin(sb, &ti, 0); /* never fails */
761 
762 	if (inode->i_data.nrpages)
763 		truncate_inode_pages(&inode->i_data, 0);
764 
765 	/* TODO: some of the following operations may fail.  */
766 	nilfs_truncate_bmap(ii, 0);
767 	nilfs_mark_inode_dirty(inode);
768 	end_writeback(inode);
769 
770 	ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
771 	if (!ret)
772 		atomic_dec(&ii->i_root->inodes_count);
773 
774 	nilfs_clear_inode(inode);
775 
776 	if (IS_SYNC(inode))
777 		nilfs_set_transaction_flag(NILFS_TI_SYNC);
778 	nilfs_transaction_commit(sb);
779 	/* May construct a logical segment and may fail in sync mode.
780 	   But delete_inode has no return value. */
781 }
782 
nilfs_setattr(struct dentry * dentry,struct iattr * iattr)783 int nilfs_setattr(struct dentry *dentry, struct iattr *iattr)
784 {
785 	struct nilfs_transaction_info ti;
786 	struct inode *inode = dentry->d_inode;
787 	struct super_block *sb = inode->i_sb;
788 	int err;
789 
790 	err = inode_change_ok(inode, iattr);
791 	if (err)
792 		return err;
793 
794 	err = nilfs_transaction_begin(sb, &ti, 0);
795 	if (unlikely(err))
796 		return err;
797 
798 	if ((iattr->ia_valid & ATTR_SIZE) &&
799 	    iattr->ia_size != i_size_read(inode)) {
800 		inode_dio_wait(inode);
801 
802 		err = vmtruncate(inode, iattr->ia_size);
803 		if (unlikely(err))
804 			goto out_err;
805 	}
806 
807 	setattr_copy(inode, iattr);
808 	mark_inode_dirty(inode);
809 
810 	if (iattr->ia_valid & ATTR_MODE) {
811 		err = nilfs_acl_chmod(inode);
812 		if (unlikely(err))
813 			goto out_err;
814 	}
815 
816 	return nilfs_transaction_commit(sb);
817 
818 out_err:
819 	nilfs_transaction_abort(sb);
820 	return err;
821 }
822 
nilfs_permission(struct inode * inode,int mask)823 int nilfs_permission(struct inode *inode, int mask)
824 {
825 	struct nilfs_root *root = NILFS_I(inode)->i_root;
826 	if ((mask & MAY_WRITE) && root &&
827 	    root->cno != NILFS_CPTREE_CURRENT_CNO)
828 		return -EROFS; /* snapshot is not writable */
829 
830 	return generic_permission(inode, mask);
831 }
832 
nilfs_load_inode_block(struct inode * inode,struct buffer_head ** pbh)833 int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
834 {
835 	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
836 	struct nilfs_inode_info *ii = NILFS_I(inode);
837 	int err;
838 
839 	spin_lock(&nilfs->ns_inode_lock);
840 	if (ii->i_bh == NULL) {
841 		spin_unlock(&nilfs->ns_inode_lock);
842 		err = nilfs_ifile_get_inode_block(ii->i_root->ifile,
843 						  inode->i_ino, pbh);
844 		if (unlikely(err))
845 			return err;
846 		spin_lock(&nilfs->ns_inode_lock);
847 		if (ii->i_bh == NULL)
848 			ii->i_bh = *pbh;
849 		else {
850 			brelse(*pbh);
851 			*pbh = ii->i_bh;
852 		}
853 	} else
854 		*pbh = ii->i_bh;
855 
856 	get_bh(*pbh);
857 	spin_unlock(&nilfs->ns_inode_lock);
858 	return 0;
859 }
860 
nilfs_inode_dirty(struct inode * inode)861 int nilfs_inode_dirty(struct inode *inode)
862 {
863 	struct nilfs_inode_info *ii = NILFS_I(inode);
864 	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
865 	int ret = 0;
866 
867 	if (!list_empty(&ii->i_dirty)) {
868 		spin_lock(&nilfs->ns_inode_lock);
869 		ret = test_bit(NILFS_I_DIRTY, &ii->i_state) ||
870 			test_bit(NILFS_I_BUSY, &ii->i_state);
871 		spin_unlock(&nilfs->ns_inode_lock);
872 	}
873 	return ret;
874 }
875 
nilfs_set_file_dirty(struct inode * inode,unsigned nr_dirty)876 int nilfs_set_file_dirty(struct inode *inode, unsigned nr_dirty)
877 {
878 	struct nilfs_inode_info *ii = NILFS_I(inode);
879 	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
880 
881 	atomic_add(nr_dirty, &nilfs->ns_ndirtyblks);
882 
883 	if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state))
884 		return 0;
885 
886 	spin_lock(&nilfs->ns_inode_lock);
887 	if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
888 	    !test_bit(NILFS_I_BUSY, &ii->i_state)) {
889 		/* Because this routine may race with nilfs_dispose_list(),
890 		   we have to check NILFS_I_QUEUED here, too. */
891 		if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) {
892 			/* This will happen when somebody is freeing
893 			   this inode. */
894 			nilfs_warning(inode->i_sb, __func__,
895 				      "cannot get inode (ino=%lu)\n",
896 				      inode->i_ino);
897 			spin_unlock(&nilfs->ns_inode_lock);
898 			return -EINVAL; /* NILFS_I_DIRTY may remain for
899 					   freeing inode */
900 		}
901 		list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files);
902 		set_bit(NILFS_I_QUEUED, &ii->i_state);
903 	}
904 	spin_unlock(&nilfs->ns_inode_lock);
905 	return 0;
906 }
907 
nilfs_mark_inode_dirty(struct inode * inode)908 int nilfs_mark_inode_dirty(struct inode *inode)
909 {
910 	struct buffer_head *ibh;
911 	int err;
912 
913 	err = nilfs_load_inode_block(inode, &ibh);
914 	if (unlikely(err)) {
915 		nilfs_warning(inode->i_sb, __func__,
916 			      "failed to reget inode block.\n");
917 		return err;
918 	}
919 	nilfs_update_inode(inode, ibh);
920 	mark_buffer_dirty(ibh);
921 	nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile);
922 	brelse(ibh);
923 	return 0;
924 }
925 
926 /**
927  * nilfs_dirty_inode - reflect changes on given inode to an inode block.
928  * @inode: inode of the file to be registered.
929  *
930  * nilfs_dirty_inode() loads a inode block containing the specified
931  * @inode and copies data from a nilfs_inode to a corresponding inode
932  * entry in the inode block. This operation is excluded from the segment
933  * construction. This function can be called both as a single operation
934  * and as a part of indivisible file operations.
935  */
nilfs_dirty_inode(struct inode * inode,int flags)936 void nilfs_dirty_inode(struct inode *inode, int flags)
937 {
938 	struct nilfs_transaction_info ti;
939 	struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
940 
941 	if (is_bad_inode(inode)) {
942 		nilfs_warning(inode->i_sb, __func__,
943 			      "tried to mark bad_inode dirty. ignored.\n");
944 		dump_stack();
945 		return;
946 	}
947 	if (mdi) {
948 		nilfs_mdt_mark_dirty(inode);
949 		return;
950 	}
951 	nilfs_transaction_begin(inode->i_sb, &ti, 0);
952 	nilfs_mark_inode_dirty(inode);
953 	nilfs_transaction_commit(inode->i_sb); /* never fails */
954 }
955 
nilfs_fiemap(struct inode * inode,struct fiemap_extent_info * fieinfo,__u64 start,__u64 len)956 int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
957 		 __u64 start, __u64 len)
958 {
959 	struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
960 	__u64 logical = 0, phys = 0, size = 0;
961 	__u32 flags = 0;
962 	loff_t isize;
963 	sector_t blkoff, end_blkoff;
964 	sector_t delalloc_blkoff;
965 	unsigned long delalloc_blklen;
966 	unsigned int blkbits = inode->i_blkbits;
967 	int ret, n;
968 
969 	ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
970 	if (ret)
971 		return ret;
972 
973 	mutex_lock(&inode->i_mutex);
974 
975 	isize = i_size_read(inode);
976 
977 	blkoff = start >> blkbits;
978 	end_blkoff = (start + len - 1) >> blkbits;
979 
980 	delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff,
981 							&delalloc_blkoff);
982 
983 	do {
984 		__u64 blkphy;
985 		unsigned int maxblocks;
986 
987 		if (delalloc_blklen && blkoff == delalloc_blkoff) {
988 			if (size) {
989 				/* End of the current extent */
990 				ret = fiemap_fill_next_extent(
991 					fieinfo, logical, phys, size, flags);
992 				if (ret)
993 					break;
994 			}
995 			if (blkoff > end_blkoff)
996 				break;
997 
998 			flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC;
999 			logical = blkoff << blkbits;
1000 			phys = 0;
1001 			size = delalloc_blklen << blkbits;
1002 
1003 			blkoff = delalloc_blkoff + delalloc_blklen;
1004 			delalloc_blklen = nilfs_find_uncommitted_extent(
1005 				inode, blkoff, &delalloc_blkoff);
1006 			continue;
1007 		}
1008 
1009 		/*
1010 		 * Limit the number of blocks that we look up so as
1011 		 * not to get into the next delayed allocation extent.
1012 		 */
1013 		maxblocks = INT_MAX;
1014 		if (delalloc_blklen)
1015 			maxblocks = min_t(sector_t, delalloc_blkoff - blkoff,
1016 					  maxblocks);
1017 		blkphy = 0;
1018 
1019 		down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1020 		n = nilfs_bmap_lookup_contig(
1021 			NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks);
1022 		up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
1023 
1024 		if (n < 0) {
1025 			int past_eof;
1026 
1027 			if (unlikely(n != -ENOENT))
1028 				break; /* error */
1029 
1030 			/* HOLE */
1031 			blkoff++;
1032 			past_eof = ((blkoff << blkbits) >= isize);
1033 
1034 			if (size) {
1035 				/* End of the current extent */
1036 
1037 				if (past_eof)
1038 					flags |= FIEMAP_EXTENT_LAST;
1039 
1040 				ret = fiemap_fill_next_extent(
1041 					fieinfo, logical, phys, size, flags);
1042 				if (ret)
1043 					break;
1044 				size = 0;
1045 			}
1046 			if (blkoff > end_blkoff || past_eof)
1047 				break;
1048 		} else {
1049 			if (size) {
1050 				if (phys && blkphy << blkbits == phys + size) {
1051 					/* The current extent goes on */
1052 					size += n << blkbits;
1053 				} else {
1054 					/* Terminate the current extent */
1055 					ret = fiemap_fill_next_extent(
1056 						fieinfo, logical, phys, size,
1057 						flags);
1058 					if (ret || blkoff > end_blkoff)
1059 						break;
1060 
1061 					/* Start another extent */
1062 					flags = FIEMAP_EXTENT_MERGED;
1063 					logical = blkoff << blkbits;
1064 					phys = blkphy << blkbits;
1065 					size = n << blkbits;
1066 				}
1067 			} else {
1068 				/* Start a new extent */
1069 				flags = FIEMAP_EXTENT_MERGED;
1070 				logical = blkoff << blkbits;
1071 				phys = blkphy << blkbits;
1072 				size = n << blkbits;
1073 			}
1074 			blkoff += n;
1075 		}
1076 		cond_resched();
1077 	} while (true);
1078 
1079 	/* If ret is 1 then we just hit the end of the extent array */
1080 	if (ret == 1)
1081 		ret = 0;
1082 
1083 	mutex_unlock(&inode->i_mutex);
1084 	return ret;
1085 }
1086