1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * fs/f2fs/node.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/mpage.h>
11 #include <linux/sched/mm.h>
12 #include <linux/blkdev.h>
13 #include <linux/pagevec.h>
14 #include <linux/swap.h>
15
16 #include "f2fs.h"
17 #include "node.h"
18 #include "segment.h"
19 #include "xattr.h"
20 #include "iostat.h"
21 #include <trace/events/f2fs.h>
22
23 #define on_f2fs_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock)
24
25 static struct kmem_cache *nat_entry_slab;
26 static struct kmem_cache *free_nid_slab;
27 static struct kmem_cache *nat_entry_set_slab;
28 static struct kmem_cache *fsync_node_entry_slab;
29
30 /*
31 * Check whether the given nid is within node id range.
32 */
f2fs_check_nid_range(struct f2fs_sb_info * sbi,nid_t nid)33 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
34 {
35 if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) {
36 set_sbi_flag(sbi, SBI_NEED_FSCK);
37 f2fs_warn(sbi, "%s: out-of-range nid=%x, run fsck to fix.",
38 __func__, nid);
39 return -EFSCORRUPTED;
40 }
41 return 0;
42 }
43
f2fs_available_free_memory(struct f2fs_sb_info * sbi,int type)44 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
45 {
46 struct f2fs_nm_info *nm_i = NM_I(sbi);
47 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
48 struct sysinfo val;
49 unsigned long avail_ram;
50 unsigned long mem_size = 0;
51 bool res = false;
52
53 if (!nm_i)
54 return true;
55
56 si_meminfo(&val);
57
58 /* only uses low memory */
59 avail_ram = val.totalram - val.totalhigh;
60
61 /*
62 * give 25%, 25%, 50%, 50%, 50% memory for each components respectively
63 */
64 if (type == FREE_NIDS) {
65 mem_size = (nm_i->nid_cnt[FREE_NID] *
66 sizeof(struct free_nid)) >> PAGE_SHIFT;
67 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
68 } else if (type == NAT_ENTRIES) {
69 mem_size = (nm_i->nat_cnt[TOTAL_NAT] *
70 sizeof(struct nat_entry)) >> PAGE_SHIFT;
71 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
72 if (excess_cached_nats(sbi))
73 res = false;
74 } else if (type == DIRTY_DENTS) {
75 if (sbi->sb->s_bdi->wb.dirty_exceeded)
76 return false;
77 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS);
78 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
79 } else if (type == INO_ENTRIES) {
80 int i;
81
82 for (i = 0; i < MAX_INO_ENTRY; i++)
83 mem_size += sbi->im[i].ino_num *
84 sizeof(struct ino_entry);
85 mem_size >>= PAGE_SHIFT;
86 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
87 } else if (type == EXTENT_CACHE) {
88 mem_size = (atomic_read(&sbi->total_ext_tree) *
89 sizeof(struct extent_tree) +
90 atomic_read(&sbi->total_ext_node) *
91 sizeof(struct extent_node)) >> PAGE_SHIFT;
92 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
93 } else if (type == DISCARD_CACHE) {
94 mem_size = (atomic_read(&dcc->discard_cmd_cnt) *
95 sizeof(struct discard_cmd)) >> PAGE_SHIFT;
96 res = mem_size < (avail_ram * nm_i->ram_thresh / 100);
97 } else if (type == COMPRESS_PAGE) {
98 #ifdef CONFIG_F2FS_FS_COMPRESSION
99 unsigned long free_ram = val.freeram;
100
101 /*
102 * free memory is lower than watermark or cached page count
103 * exceed threshold, deny caching compress page.
104 */
105 res = (free_ram > avail_ram * sbi->compress_watermark / 100) &&
106 (COMPRESS_MAPPING(sbi)->nrpages <
107 free_ram * sbi->compress_percent / 100);
108 #else
109 res = false;
110 #endif
111 } else {
112 if (!sbi->sb->s_bdi->wb.dirty_exceeded)
113 return true;
114 }
115 return res;
116 }
117
clear_node_page_dirty(struct page * page)118 static void clear_node_page_dirty(struct page *page)
119 {
120 if (PageDirty(page)) {
121 f2fs_clear_page_cache_dirty_tag(page);
122 clear_page_dirty_for_io(page);
123 dec_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
124 }
125 ClearPageUptodate(page);
126 }
127
get_current_nat_page(struct f2fs_sb_info * sbi,nid_t nid)128 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
129 {
130 return f2fs_get_meta_page_retry(sbi, current_nat_addr(sbi, nid));
131 }
132
get_next_nat_page(struct f2fs_sb_info * sbi,nid_t nid)133 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
134 {
135 struct page *src_page;
136 struct page *dst_page;
137 pgoff_t dst_off;
138 void *src_addr;
139 void *dst_addr;
140 struct f2fs_nm_info *nm_i = NM_I(sbi);
141
142 dst_off = next_nat_addr(sbi, current_nat_addr(sbi, nid));
143
144 /* get current nat block page with lock */
145 src_page = get_current_nat_page(sbi, nid);
146 if (IS_ERR(src_page))
147 return src_page;
148 dst_page = f2fs_grab_meta_page(sbi, dst_off);
149 f2fs_bug_on(sbi, PageDirty(src_page));
150
151 src_addr = page_address(src_page);
152 dst_addr = page_address(dst_page);
153 memcpy(dst_addr, src_addr, PAGE_SIZE);
154 set_page_dirty(dst_page);
155 f2fs_put_page(src_page, 1);
156
157 set_to_next_nat(nm_i, nid);
158
159 return dst_page;
160 }
161
__alloc_nat_entry(struct f2fs_sb_info * sbi,nid_t nid,bool no_fail)162 static struct nat_entry *__alloc_nat_entry(struct f2fs_sb_info *sbi,
163 nid_t nid, bool no_fail)
164 {
165 struct nat_entry *new;
166
167 new = f2fs_kmem_cache_alloc(nat_entry_slab,
168 GFP_F2FS_ZERO, no_fail, sbi);
169 if (new) {
170 nat_set_nid(new, nid);
171 nat_reset_flag(new);
172 }
173 return new;
174 }
175
__free_nat_entry(struct nat_entry * e)176 static void __free_nat_entry(struct nat_entry *e)
177 {
178 kmem_cache_free(nat_entry_slab, e);
179 }
180
181 /* must be locked by nat_tree_lock */
__init_nat_entry(struct f2fs_nm_info * nm_i,struct nat_entry * ne,struct f2fs_nat_entry * raw_ne,bool no_fail)182 static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i,
183 struct nat_entry *ne, struct f2fs_nat_entry *raw_ne, bool no_fail)
184 {
185 if (no_fail)
186 f2fs_radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne);
187 else if (radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne))
188 return NULL;
189
190 if (raw_ne)
191 node_info_from_raw_nat(&ne->ni, raw_ne);
192
193 spin_lock(&nm_i->nat_list_lock);
194 list_add_tail(&ne->list, &nm_i->nat_entries);
195 spin_unlock(&nm_i->nat_list_lock);
196
197 nm_i->nat_cnt[TOTAL_NAT]++;
198 nm_i->nat_cnt[RECLAIMABLE_NAT]++;
199 return ne;
200 }
201
__lookup_nat_cache(struct f2fs_nm_info * nm_i,nid_t n)202 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
203 {
204 struct nat_entry *ne;
205
206 ne = radix_tree_lookup(&nm_i->nat_root, n);
207
208 /* for recent accessed nat entry, move it to tail of lru list */
209 if (ne && !get_nat_flag(ne, IS_DIRTY)) {
210 spin_lock(&nm_i->nat_list_lock);
211 if (!list_empty(&ne->list))
212 list_move_tail(&ne->list, &nm_i->nat_entries);
213 spin_unlock(&nm_i->nat_list_lock);
214 }
215
216 return ne;
217 }
218
__gang_lookup_nat_cache(struct f2fs_nm_info * nm_i,nid_t start,unsigned int nr,struct nat_entry ** ep)219 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
220 nid_t start, unsigned int nr, struct nat_entry **ep)
221 {
222 return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
223 }
224
__del_from_nat_cache(struct f2fs_nm_info * nm_i,struct nat_entry * e)225 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
226 {
227 radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
228 nm_i->nat_cnt[TOTAL_NAT]--;
229 nm_i->nat_cnt[RECLAIMABLE_NAT]--;
230 __free_nat_entry(e);
231 }
232
__grab_nat_entry_set(struct f2fs_nm_info * nm_i,struct nat_entry * ne)233 static struct nat_entry_set *__grab_nat_entry_set(struct f2fs_nm_info *nm_i,
234 struct nat_entry *ne)
235 {
236 nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
237 struct nat_entry_set *head;
238
239 head = radix_tree_lookup(&nm_i->nat_set_root, set);
240 if (!head) {
241 head = f2fs_kmem_cache_alloc(nat_entry_set_slab,
242 GFP_NOFS, true, NULL);
243
244 INIT_LIST_HEAD(&head->entry_list);
245 INIT_LIST_HEAD(&head->set_list);
246 head->set = set;
247 head->entry_cnt = 0;
248 f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head);
249 }
250 return head;
251 }
252
__set_nat_cache_dirty(struct f2fs_nm_info * nm_i,struct nat_entry * ne)253 static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
254 struct nat_entry *ne)
255 {
256 struct nat_entry_set *head;
257 bool new_ne = nat_get_blkaddr(ne) == NEW_ADDR;
258
259 if (!new_ne)
260 head = __grab_nat_entry_set(nm_i, ne);
261
262 /*
263 * update entry_cnt in below condition:
264 * 1. update NEW_ADDR to valid block address;
265 * 2. update old block address to new one;
266 */
267 if (!new_ne && (get_nat_flag(ne, IS_PREALLOC) ||
268 !get_nat_flag(ne, IS_DIRTY)))
269 head->entry_cnt++;
270
271 set_nat_flag(ne, IS_PREALLOC, new_ne);
272
273 if (get_nat_flag(ne, IS_DIRTY))
274 goto refresh_list;
275
276 nm_i->nat_cnt[DIRTY_NAT]++;
277 nm_i->nat_cnt[RECLAIMABLE_NAT]--;
278 set_nat_flag(ne, IS_DIRTY, true);
279 refresh_list:
280 spin_lock(&nm_i->nat_list_lock);
281 if (new_ne)
282 list_del_init(&ne->list);
283 else
284 list_move_tail(&ne->list, &head->entry_list);
285 spin_unlock(&nm_i->nat_list_lock);
286 }
287
__clear_nat_cache_dirty(struct f2fs_nm_info * nm_i,struct nat_entry_set * set,struct nat_entry * ne)288 static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
289 struct nat_entry_set *set, struct nat_entry *ne)
290 {
291 spin_lock(&nm_i->nat_list_lock);
292 list_move_tail(&ne->list, &nm_i->nat_entries);
293 spin_unlock(&nm_i->nat_list_lock);
294
295 set_nat_flag(ne, IS_DIRTY, false);
296 set->entry_cnt--;
297 nm_i->nat_cnt[DIRTY_NAT]--;
298 nm_i->nat_cnt[RECLAIMABLE_NAT]++;
299 }
300
__gang_lookup_nat_set(struct f2fs_nm_info * nm_i,nid_t start,unsigned int nr,struct nat_entry_set ** ep)301 static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
302 nid_t start, unsigned int nr, struct nat_entry_set **ep)
303 {
304 return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep,
305 start, nr);
306 }
307
f2fs_in_warm_node_list(struct f2fs_sb_info * sbi,struct page * page)308 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page)
309 {
310 return NODE_MAPPING(sbi) == page->mapping &&
311 IS_DNODE(page) && is_cold_node(page);
312 }
313
f2fs_init_fsync_node_info(struct f2fs_sb_info * sbi)314 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi)
315 {
316 spin_lock_init(&sbi->fsync_node_lock);
317 INIT_LIST_HEAD(&sbi->fsync_node_list);
318 sbi->fsync_seg_id = 0;
319 sbi->fsync_node_num = 0;
320 }
321
f2fs_add_fsync_node_entry(struct f2fs_sb_info * sbi,struct page * page)322 static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi,
323 struct page *page)
324 {
325 struct fsync_node_entry *fn;
326 unsigned long flags;
327 unsigned int seq_id;
328
329 fn = f2fs_kmem_cache_alloc(fsync_node_entry_slab,
330 GFP_NOFS, true, NULL);
331
332 get_page(page);
333 fn->page = page;
334 INIT_LIST_HEAD(&fn->list);
335
336 spin_lock_irqsave(&sbi->fsync_node_lock, flags);
337 list_add_tail(&fn->list, &sbi->fsync_node_list);
338 fn->seq_id = sbi->fsync_seg_id++;
339 seq_id = fn->seq_id;
340 sbi->fsync_node_num++;
341 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
342
343 return seq_id;
344 }
345
f2fs_del_fsync_node_entry(struct f2fs_sb_info * sbi,struct page * page)346 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page)
347 {
348 struct fsync_node_entry *fn;
349 unsigned long flags;
350
351 spin_lock_irqsave(&sbi->fsync_node_lock, flags);
352 list_for_each_entry(fn, &sbi->fsync_node_list, list) {
353 if (fn->page == page) {
354 list_del(&fn->list);
355 sbi->fsync_node_num--;
356 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
357 kmem_cache_free(fsync_node_entry_slab, fn);
358 put_page(page);
359 return;
360 }
361 }
362 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
363 f2fs_bug_on(sbi, 1);
364 }
365
f2fs_reset_fsync_node_info(struct f2fs_sb_info * sbi)366 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi)
367 {
368 unsigned long flags;
369
370 spin_lock_irqsave(&sbi->fsync_node_lock, flags);
371 sbi->fsync_seg_id = 0;
372 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
373 }
374
f2fs_need_dentry_mark(struct f2fs_sb_info * sbi,nid_t nid)375 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
376 {
377 struct f2fs_nm_info *nm_i = NM_I(sbi);
378 struct nat_entry *e;
379 bool need = false;
380
381 f2fs_down_read(&nm_i->nat_tree_lock);
382 e = __lookup_nat_cache(nm_i, nid);
383 if (e) {
384 if (!get_nat_flag(e, IS_CHECKPOINTED) &&
385 !get_nat_flag(e, HAS_FSYNCED_INODE))
386 need = true;
387 }
388 f2fs_up_read(&nm_i->nat_tree_lock);
389 return need;
390 }
391
f2fs_is_checkpointed_node(struct f2fs_sb_info * sbi,nid_t nid)392 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
393 {
394 struct f2fs_nm_info *nm_i = NM_I(sbi);
395 struct nat_entry *e;
396 bool is_cp = true;
397
398 f2fs_down_read(&nm_i->nat_tree_lock);
399 e = __lookup_nat_cache(nm_i, nid);
400 if (e && !get_nat_flag(e, IS_CHECKPOINTED))
401 is_cp = false;
402 f2fs_up_read(&nm_i->nat_tree_lock);
403 return is_cp;
404 }
405
f2fs_need_inode_block_update(struct f2fs_sb_info * sbi,nid_t ino)406 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
407 {
408 struct f2fs_nm_info *nm_i = NM_I(sbi);
409 struct nat_entry *e;
410 bool need_update = true;
411
412 f2fs_down_read(&nm_i->nat_tree_lock);
413 e = __lookup_nat_cache(nm_i, ino);
414 if (e && get_nat_flag(e, HAS_LAST_FSYNC) &&
415 (get_nat_flag(e, IS_CHECKPOINTED) ||
416 get_nat_flag(e, HAS_FSYNCED_INODE)))
417 need_update = false;
418 f2fs_up_read(&nm_i->nat_tree_lock);
419 return need_update;
420 }
421
422 /* must be locked by nat_tree_lock */
cache_nat_entry(struct f2fs_sb_info * sbi,nid_t nid,struct f2fs_nat_entry * ne)423 static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
424 struct f2fs_nat_entry *ne)
425 {
426 struct f2fs_nm_info *nm_i = NM_I(sbi);
427 struct nat_entry *new, *e;
428
429 /* Let's mitigate lock contention of nat_tree_lock during checkpoint */
430 if (f2fs_rwsem_is_locked(&sbi->cp_global_sem))
431 return;
432
433 new = __alloc_nat_entry(sbi, nid, false);
434 if (!new)
435 return;
436
437 f2fs_down_write(&nm_i->nat_tree_lock);
438 e = __lookup_nat_cache(nm_i, nid);
439 if (!e)
440 e = __init_nat_entry(nm_i, new, ne, false);
441 else
442 f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) ||
443 nat_get_blkaddr(e) !=
444 le32_to_cpu(ne->block_addr) ||
445 nat_get_version(e) != ne->version);
446 f2fs_up_write(&nm_i->nat_tree_lock);
447 if (e != new)
448 __free_nat_entry(new);
449 }
450
set_node_addr(struct f2fs_sb_info * sbi,struct node_info * ni,block_t new_blkaddr,bool fsync_done)451 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
452 block_t new_blkaddr, bool fsync_done)
453 {
454 struct f2fs_nm_info *nm_i = NM_I(sbi);
455 struct nat_entry *e;
456 struct nat_entry *new = __alloc_nat_entry(sbi, ni->nid, true);
457
458 f2fs_down_write(&nm_i->nat_tree_lock);
459 e = __lookup_nat_cache(nm_i, ni->nid);
460 if (!e) {
461 e = __init_nat_entry(nm_i, new, NULL, true);
462 copy_node_info(&e->ni, ni);
463 f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR);
464 } else if (new_blkaddr == NEW_ADDR) {
465 /*
466 * when nid is reallocated,
467 * previous nat entry can be remained in nat cache.
468 * So, reinitialize it with new information.
469 */
470 copy_node_info(&e->ni, ni);
471 f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR);
472 }
473 /* let's free early to reduce memory consumption */
474 if (e != new)
475 __free_nat_entry(new);
476
477 /* sanity check */
478 f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr);
479 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR &&
480 new_blkaddr == NULL_ADDR);
481 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
482 new_blkaddr == NEW_ADDR);
483 f2fs_bug_on(sbi, __is_valid_data_blkaddr(nat_get_blkaddr(e)) &&
484 new_blkaddr == NEW_ADDR);
485
486 /* increment version no as node is removed */
487 if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
488 unsigned char version = nat_get_version(e);
489
490 nat_set_version(e, inc_node_version(version));
491 }
492
493 /* change address */
494 nat_set_blkaddr(e, new_blkaddr);
495 if (!__is_valid_data_blkaddr(new_blkaddr))
496 set_nat_flag(e, IS_CHECKPOINTED, false);
497 __set_nat_cache_dirty(nm_i, e);
498
499 /* update fsync_mark if its inode nat entry is still alive */
500 if (ni->nid != ni->ino)
501 e = __lookup_nat_cache(nm_i, ni->ino);
502 if (e) {
503 if (fsync_done && ni->nid == ni->ino)
504 set_nat_flag(e, HAS_FSYNCED_INODE, true);
505 set_nat_flag(e, HAS_LAST_FSYNC, fsync_done);
506 }
507 f2fs_up_write(&nm_i->nat_tree_lock);
508 }
509
f2fs_try_to_free_nats(struct f2fs_sb_info * sbi,int nr_shrink)510 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
511 {
512 struct f2fs_nm_info *nm_i = NM_I(sbi);
513 int nr = nr_shrink;
514
515 if (!f2fs_down_write_trylock(&nm_i->nat_tree_lock))
516 return 0;
517
518 spin_lock(&nm_i->nat_list_lock);
519 while (nr_shrink) {
520 struct nat_entry *ne;
521
522 if (list_empty(&nm_i->nat_entries))
523 break;
524
525 ne = list_first_entry(&nm_i->nat_entries,
526 struct nat_entry, list);
527 list_del(&ne->list);
528 spin_unlock(&nm_i->nat_list_lock);
529
530 __del_from_nat_cache(nm_i, ne);
531 nr_shrink--;
532
533 spin_lock(&nm_i->nat_list_lock);
534 }
535 spin_unlock(&nm_i->nat_list_lock);
536
537 f2fs_up_write(&nm_i->nat_tree_lock);
538 return nr - nr_shrink;
539 }
540
f2fs_get_node_info(struct f2fs_sb_info * sbi,nid_t nid,struct node_info * ni,bool checkpoint_context)541 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
542 struct node_info *ni, bool checkpoint_context)
543 {
544 struct f2fs_nm_info *nm_i = NM_I(sbi);
545 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
546 struct f2fs_journal *journal = curseg->journal;
547 nid_t start_nid = START_NID(nid);
548 struct f2fs_nat_block *nat_blk;
549 struct page *page = NULL;
550 struct f2fs_nat_entry ne;
551 struct nat_entry *e;
552 pgoff_t index;
553 block_t blkaddr;
554 int i;
555
556 ni->nid = nid;
557 retry:
558 /* Check nat cache */
559 f2fs_down_read(&nm_i->nat_tree_lock);
560 e = __lookup_nat_cache(nm_i, nid);
561 if (e) {
562 ni->ino = nat_get_ino(e);
563 ni->blk_addr = nat_get_blkaddr(e);
564 ni->version = nat_get_version(e);
565 f2fs_up_read(&nm_i->nat_tree_lock);
566 return 0;
567 }
568
569 /*
570 * Check current segment summary by trying to grab journal_rwsem first.
571 * This sem is on the critical path on the checkpoint requiring the above
572 * nat_tree_lock. Therefore, we should retry, if we failed to grab here
573 * while not bothering checkpoint.
574 */
575 if (!f2fs_rwsem_is_locked(&sbi->cp_global_sem) || checkpoint_context) {
576 down_read(&curseg->journal_rwsem);
577 } else if (f2fs_rwsem_is_contended(&nm_i->nat_tree_lock) ||
578 !down_read_trylock(&curseg->journal_rwsem)) {
579 f2fs_up_read(&nm_i->nat_tree_lock);
580 goto retry;
581 }
582
583 i = f2fs_lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0);
584 if (i >= 0) {
585 ne = nat_in_journal(journal, i);
586 node_info_from_raw_nat(ni, &ne);
587 }
588 up_read(&curseg->journal_rwsem);
589 if (i >= 0) {
590 f2fs_up_read(&nm_i->nat_tree_lock);
591 goto cache;
592 }
593
594 /* Fill node_info from nat page */
595 index = current_nat_addr(sbi, nid);
596 f2fs_up_read(&nm_i->nat_tree_lock);
597
598 page = f2fs_get_meta_page(sbi, index);
599 if (IS_ERR(page))
600 return PTR_ERR(page);
601
602 nat_blk = (struct f2fs_nat_block *)page_address(page);
603 ne = nat_blk->entries[nid - start_nid];
604 node_info_from_raw_nat(ni, &ne);
605 f2fs_put_page(page, 1);
606 cache:
607 blkaddr = le32_to_cpu(ne.block_addr);
608 if (__is_valid_data_blkaddr(blkaddr) &&
609 !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE))
610 return -EFAULT;
611
612 /* cache nat entry */
613 cache_nat_entry(sbi, nid, &ne);
614 return 0;
615 }
616
617 /*
618 * readahead MAX_RA_NODE number of node pages.
619 */
f2fs_ra_node_pages(struct page * parent,int start,int n)620 static void f2fs_ra_node_pages(struct page *parent, int start, int n)
621 {
622 struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
623 struct blk_plug plug;
624 int i, end;
625 nid_t nid;
626
627 blk_start_plug(&plug);
628
629 /* Then, try readahead for siblings of the desired node */
630 end = start + n;
631 end = min(end, NIDS_PER_BLOCK);
632 for (i = start; i < end; i++) {
633 nid = get_nid(parent, i, false);
634 f2fs_ra_node_page(sbi, nid);
635 }
636
637 blk_finish_plug(&plug);
638 }
639
f2fs_get_next_page_offset(struct dnode_of_data * dn,pgoff_t pgofs)640 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs)
641 {
642 const long direct_index = ADDRS_PER_INODE(dn->inode);
643 const long direct_blks = ADDRS_PER_BLOCK(dn->inode);
644 const long indirect_blks = ADDRS_PER_BLOCK(dn->inode) * NIDS_PER_BLOCK;
645 unsigned int skipped_unit = ADDRS_PER_BLOCK(dn->inode);
646 int cur_level = dn->cur_level;
647 int max_level = dn->max_level;
648 pgoff_t base = 0;
649
650 if (!dn->max_level)
651 return pgofs + 1;
652
653 while (max_level-- > cur_level)
654 skipped_unit *= NIDS_PER_BLOCK;
655
656 switch (dn->max_level) {
657 case 3:
658 base += 2 * indirect_blks;
659 fallthrough;
660 case 2:
661 base += 2 * direct_blks;
662 fallthrough;
663 case 1:
664 base += direct_index;
665 break;
666 default:
667 f2fs_bug_on(F2FS_I_SB(dn->inode), 1);
668 }
669
670 return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base;
671 }
672
673 /*
674 * The maximum depth is four.
675 * Offset[0] will have raw inode offset.
676 */
get_node_path(struct inode * inode,long block,int offset[4],unsigned int noffset[4])677 static int get_node_path(struct inode *inode, long block,
678 int offset[4], unsigned int noffset[4])
679 {
680 const long direct_index = ADDRS_PER_INODE(inode);
681 const long direct_blks = ADDRS_PER_BLOCK(inode);
682 const long dptrs_per_blk = NIDS_PER_BLOCK;
683 const long indirect_blks = ADDRS_PER_BLOCK(inode) * NIDS_PER_BLOCK;
684 const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
685 int n = 0;
686 int level = 0;
687
688 noffset[0] = 0;
689
690 if (block < direct_index) {
691 offset[n] = block;
692 goto got;
693 }
694 block -= direct_index;
695 if (block < direct_blks) {
696 offset[n++] = NODE_DIR1_BLOCK;
697 noffset[n] = 1;
698 offset[n] = block;
699 level = 1;
700 goto got;
701 }
702 block -= direct_blks;
703 if (block < direct_blks) {
704 offset[n++] = NODE_DIR2_BLOCK;
705 noffset[n] = 2;
706 offset[n] = block;
707 level = 1;
708 goto got;
709 }
710 block -= direct_blks;
711 if (block < indirect_blks) {
712 offset[n++] = NODE_IND1_BLOCK;
713 noffset[n] = 3;
714 offset[n++] = block / direct_blks;
715 noffset[n] = 4 + offset[n - 1];
716 offset[n] = block % direct_blks;
717 level = 2;
718 goto got;
719 }
720 block -= indirect_blks;
721 if (block < indirect_blks) {
722 offset[n++] = NODE_IND2_BLOCK;
723 noffset[n] = 4 + dptrs_per_blk;
724 offset[n++] = block / direct_blks;
725 noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
726 offset[n] = block % direct_blks;
727 level = 2;
728 goto got;
729 }
730 block -= indirect_blks;
731 if (block < dindirect_blks) {
732 offset[n++] = NODE_DIND_BLOCK;
733 noffset[n] = 5 + (dptrs_per_blk * 2);
734 offset[n++] = block / indirect_blks;
735 noffset[n] = 6 + (dptrs_per_blk * 2) +
736 offset[n - 1] * (dptrs_per_blk + 1);
737 offset[n++] = (block / direct_blks) % dptrs_per_blk;
738 noffset[n] = 7 + (dptrs_per_blk * 2) +
739 offset[n - 2] * (dptrs_per_blk + 1) +
740 offset[n - 1];
741 offset[n] = block % direct_blks;
742 level = 3;
743 goto got;
744 } else {
745 return -E2BIG;
746 }
747 got:
748 return level;
749 }
750
751 /*
752 * Caller should call f2fs_put_dnode(dn).
753 * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
754 * f2fs_unlock_op() only if mode is set with ALLOC_NODE.
755 */
f2fs_get_dnode_of_data(struct dnode_of_data * dn,pgoff_t index,int mode)756 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
757 {
758 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
759 struct page *npage[4];
760 struct page *parent = NULL;
761 int offset[4];
762 unsigned int noffset[4];
763 nid_t nids[4];
764 int level, i = 0;
765 int err = 0;
766
767 level = get_node_path(dn->inode, index, offset, noffset);
768 if (level < 0)
769 return level;
770
771 nids[0] = dn->inode->i_ino;
772 npage[0] = dn->inode_page;
773
774 if (!npage[0]) {
775 npage[0] = f2fs_get_node_page(sbi, nids[0]);
776 if (IS_ERR(npage[0]))
777 return PTR_ERR(npage[0]);
778 }
779
780 /* if inline_data is set, should not report any block indices */
781 if (f2fs_has_inline_data(dn->inode) && index) {
782 err = -ENOENT;
783 f2fs_put_page(npage[0], 1);
784 goto release_out;
785 }
786
787 parent = npage[0];
788 if (level != 0)
789 nids[1] = get_nid(parent, offset[0], true);
790 dn->inode_page = npage[0];
791 dn->inode_page_locked = true;
792
793 /* get indirect or direct nodes */
794 for (i = 1; i <= level; i++) {
795 bool done = false;
796
797 if (!nids[i] && mode == ALLOC_NODE) {
798 /* alloc new node */
799 if (!f2fs_alloc_nid(sbi, &(nids[i]))) {
800 err = -ENOSPC;
801 goto release_pages;
802 }
803
804 dn->nid = nids[i];
805 npage[i] = f2fs_new_node_page(dn, noffset[i]);
806 if (IS_ERR(npage[i])) {
807 f2fs_alloc_nid_failed(sbi, nids[i]);
808 err = PTR_ERR(npage[i]);
809 goto release_pages;
810 }
811
812 set_nid(parent, offset[i - 1], nids[i], i == 1);
813 f2fs_alloc_nid_done(sbi, nids[i]);
814 done = true;
815 } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
816 npage[i] = f2fs_get_node_page_ra(parent, offset[i - 1]);
817 if (IS_ERR(npage[i])) {
818 err = PTR_ERR(npage[i]);
819 goto release_pages;
820 }
821 done = true;
822 }
823 if (i == 1) {
824 dn->inode_page_locked = false;
825 unlock_page(parent);
826 } else {
827 f2fs_put_page(parent, 1);
828 }
829
830 if (!done) {
831 npage[i] = f2fs_get_node_page(sbi, nids[i]);
832 if (IS_ERR(npage[i])) {
833 err = PTR_ERR(npage[i]);
834 f2fs_put_page(npage[0], 0);
835 goto release_out;
836 }
837 }
838 if (i < level) {
839 parent = npage[i];
840 nids[i + 1] = get_nid(parent, offset[i], false);
841 }
842 }
843 dn->nid = nids[level];
844 dn->ofs_in_node = offset[level];
845 dn->node_page = npage[level];
846 dn->data_blkaddr = f2fs_data_blkaddr(dn);
847
848 if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) &&
849 f2fs_sb_has_readonly(sbi)) {
850 unsigned int c_len = f2fs_cluster_blocks_are_contiguous(dn);
851 block_t blkaddr;
852
853 if (!c_len)
854 goto out;
855
856 blkaddr = f2fs_data_blkaddr(dn);
857 if (blkaddr == COMPRESS_ADDR)
858 blkaddr = data_blkaddr(dn->inode, dn->node_page,
859 dn->ofs_in_node + 1);
860
861 f2fs_update_extent_tree_range_compressed(dn->inode,
862 index, blkaddr,
863 F2FS_I(dn->inode)->i_cluster_size,
864 c_len);
865 }
866 out:
867 return 0;
868
869 release_pages:
870 f2fs_put_page(parent, 1);
871 if (i > 1)
872 f2fs_put_page(npage[0], 0);
873 release_out:
874 dn->inode_page = NULL;
875 dn->node_page = NULL;
876 if (err == -ENOENT) {
877 dn->cur_level = i;
878 dn->max_level = level;
879 dn->ofs_in_node = offset[level];
880 }
881 return err;
882 }
883
truncate_node(struct dnode_of_data * dn)884 static int truncate_node(struct dnode_of_data *dn)
885 {
886 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
887 struct node_info ni;
888 int err;
889 pgoff_t index;
890
891 err = f2fs_get_node_info(sbi, dn->nid, &ni, false);
892 if (err)
893 return err;
894
895 /* Deallocate node address */
896 f2fs_invalidate_blocks(sbi, ni.blk_addr);
897 dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino);
898 set_node_addr(sbi, &ni, NULL_ADDR, false);
899
900 if (dn->nid == dn->inode->i_ino) {
901 f2fs_remove_orphan_inode(sbi, dn->nid);
902 dec_valid_inode_count(sbi);
903 f2fs_inode_synced(dn->inode);
904 }
905
906 clear_node_page_dirty(dn->node_page);
907 set_sbi_flag(sbi, SBI_IS_DIRTY);
908
909 index = dn->node_page->index;
910 f2fs_put_page(dn->node_page, 1);
911
912 invalidate_mapping_pages(NODE_MAPPING(sbi),
913 index, index);
914
915 dn->node_page = NULL;
916 trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
917
918 return 0;
919 }
920
truncate_dnode(struct dnode_of_data * dn)921 static int truncate_dnode(struct dnode_of_data *dn)
922 {
923 struct page *page;
924 int err;
925
926 if (dn->nid == 0)
927 return 1;
928
929 /* get direct node */
930 page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
931 if (PTR_ERR(page) == -ENOENT)
932 return 1;
933 else if (IS_ERR(page))
934 return PTR_ERR(page);
935
936 /* Make dnode_of_data for parameter */
937 dn->node_page = page;
938 dn->ofs_in_node = 0;
939 f2fs_truncate_data_blocks(dn);
940 err = truncate_node(dn);
941 if (err)
942 return err;
943
944 return 1;
945 }
946
truncate_nodes(struct dnode_of_data * dn,unsigned int nofs,int ofs,int depth)947 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
948 int ofs, int depth)
949 {
950 struct dnode_of_data rdn = *dn;
951 struct page *page;
952 struct f2fs_node *rn;
953 nid_t child_nid;
954 unsigned int child_nofs;
955 int freed = 0;
956 int i, ret;
957
958 if (dn->nid == 0)
959 return NIDS_PER_BLOCK + 1;
960
961 trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
962
963 page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
964 if (IS_ERR(page)) {
965 trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
966 return PTR_ERR(page);
967 }
968
969 f2fs_ra_node_pages(page, ofs, NIDS_PER_BLOCK);
970
971 rn = F2FS_NODE(page);
972 if (depth < 3) {
973 for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
974 child_nid = le32_to_cpu(rn->in.nid[i]);
975 if (child_nid == 0)
976 continue;
977 rdn.nid = child_nid;
978 ret = truncate_dnode(&rdn);
979 if (ret < 0)
980 goto out_err;
981 if (set_nid(page, i, 0, false))
982 dn->node_changed = true;
983 }
984 } else {
985 child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
986 for (i = ofs; i < NIDS_PER_BLOCK; i++) {
987 child_nid = le32_to_cpu(rn->in.nid[i]);
988 if (child_nid == 0) {
989 child_nofs += NIDS_PER_BLOCK + 1;
990 continue;
991 }
992 rdn.nid = child_nid;
993 ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
994 if (ret == (NIDS_PER_BLOCK + 1)) {
995 if (set_nid(page, i, 0, false))
996 dn->node_changed = true;
997 child_nofs += ret;
998 } else if (ret < 0 && ret != -ENOENT) {
999 goto out_err;
1000 }
1001 }
1002 freed = child_nofs;
1003 }
1004
1005 if (!ofs) {
1006 /* remove current indirect node */
1007 dn->node_page = page;
1008 ret = truncate_node(dn);
1009 if (ret)
1010 goto out_err;
1011 freed++;
1012 } else {
1013 f2fs_put_page(page, 1);
1014 }
1015 trace_f2fs_truncate_nodes_exit(dn->inode, freed);
1016 return freed;
1017
1018 out_err:
1019 f2fs_put_page(page, 1);
1020 trace_f2fs_truncate_nodes_exit(dn->inode, ret);
1021 return ret;
1022 }
1023
truncate_partial_nodes(struct dnode_of_data * dn,struct f2fs_inode * ri,int * offset,int depth)1024 static int truncate_partial_nodes(struct dnode_of_data *dn,
1025 struct f2fs_inode *ri, int *offset, int depth)
1026 {
1027 struct page *pages[2];
1028 nid_t nid[3];
1029 nid_t child_nid;
1030 int err = 0;
1031 int i;
1032 int idx = depth - 2;
1033
1034 nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
1035 if (!nid[0])
1036 return 0;
1037
1038 /* get indirect nodes in the path */
1039 for (i = 0; i < idx + 1; i++) {
1040 /* reference count'll be increased */
1041 pages[i] = f2fs_get_node_page(F2FS_I_SB(dn->inode), nid[i]);
1042 if (IS_ERR(pages[i])) {
1043 err = PTR_ERR(pages[i]);
1044 idx = i - 1;
1045 goto fail;
1046 }
1047 nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
1048 }
1049
1050 f2fs_ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK);
1051
1052 /* free direct nodes linked to a partial indirect node */
1053 for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
1054 child_nid = get_nid(pages[idx], i, false);
1055 if (!child_nid)
1056 continue;
1057 dn->nid = child_nid;
1058 err = truncate_dnode(dn);
1059 if (err < 0)
1060 goto fail;
1061 if (set_nid(pages[idx], i, 0, false))
1062 dn->node_changed = true;
1063 }
1064
1065 if (offset[idx + 1] == 0) {
1066 dn->node_page = pages[idx];
1067 dn->nid = nid[idx];
1068 err = truncate_node(dn);
1069 if (err)
1070 goto fail;
1071 } else {
1072 f2fs_put_page(pages[idx], 1);
1073 }
1074 offset[idx]++;
1075 offset[idx + 1] = 0;
1076 idx--;
1077 fail:
1078 for (i = idx; i >= 0; i--)
1079 f2fs_put_page(pages[i], 1);
1080
1081 trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
1082
1083 return err;
1084 }
1085
1086 /*
1087 * All the block addresses of data and nodes should be nullified.
1088 */
f2fs_truncate_inode_blocks(struct inode * inode,pgoff_t from)1089 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from)
1090 {
1091 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1092 int err = 0, cont = 1;
1093 int level, offset[4], noffset[4];
1094 unsigned int nofs = 0;
1095 struct f2fs_inode *ri;
1096 struct dnode_of_data dn;
1097 struct page *page;
1098
1099 trace_f2fs_truncate_inode_blocks_enter(inode, from);
1100
1101 level = get_node_path(inode, from, offset, noffset);
1102 if (level < 0) {
1103 trace_f2fs_truncate_inode_blocks_exit(inode, level);
1104 return level;
1105 }
1106
1107 page = f2fs_get_node_page(sbi, inode->i_ino);
1108 if (IS_ERR(page)) {
1109 trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
1110 return PTR_ERR(page);
1111 }
1112
1113 set_new_dnode(&dn, inode, page, NULL, 0);
1114 unlock_page(page);
1115
1116 ri = F2FS_INODE(page);
1117 switch (level) {
1118 case 0:
1119 case 1:
1120 nofs = noffset[1];
1121 break;
1122 case 2:
1123 nofs = noffset[1];
1124 if (!offset[level - 1])
1125 goto skip_partial;
1126 err = truncate_partial_nodes(&dn, ri, offset, level);
1127 if (err < 0 && err != -ENOENT)
1128 goto fail;
1129 nofs += 1 + NIDS_PER_BLOCK;
1130 break;
1131 case 3:
1132 nofs = 5 + 2 * NIDS_PER_BLOCK;
1133 if (!offset[level - 1])
1134 goto skip_partial;
1135 err = truncate_partial_nodes(&dn, ri, offset, level);
1136 if (err < 0 && err != -ENOENT)
1137 goto fail;
1138 break;
1139 default:
1140 BUG();
1141 }
1142
1143 skip_partial:
1144 while (cont) {
1145 dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
1146 switch (offset[0]) {
1147 case NODE_DIR1_BLOCK:
1148 case NODE_DIR2_BLOCK:
1149 err = truncate_dnode(&dn);
1150 break;
1151
1152 case NODE_IND1_BLOCK:
1153 case NODE_IND2_BLOCK:
1154 err = truncate_nodes(&dn, nofs, offset[1], 2);
1155 break;
1156
1157 case NODE_DIND_BLOCK:
1158 err = truncate_nodes(&dn, nofs, offset[1], 3);
1159 cont = 0;
1160 break;
1161
1162 default:
1163 BUG();
1164 }
1165 if (err < 0 && err != -ENOENT)
1166 goto fail;
1167 if (offset[1] == 0 &&
1168 ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
1169 lock_page(page);
1170 BUG_ON(page->mapping != NODE_MAPPING(sbi));
1171 f2fs_wait_on_page_writeback(page, NODE, true, true);
1172 ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
1173 set_page_dirty(page);
1174 unlock_page(page);
1175 }
1176 offset[1] = 0;
1177 offset[0]++;
1178 nofs += err;
1179 }
1180 fail:
1181 f2fs_put_page(page, 0);
1182 trace_f2fs_truncate_inode_blocks_exit(inode, err);
1183 return err > 0 ? 0 : err;
1184 }
1185
1186 /* caller must lock inode page */
f2fs_truncate_xattr_node(struct inode * inode)1187 int f2fs_truncate_xattr_node(struct inode *inode)
1188 {
1189 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1190 nid_t nid = F2FS_I(inode)->i_xattr_nid;
1191 struct dnode_of_data dn;
1192 struct page *npage;
1193 int err;
1194
1195 if (!nid)
1196 return 0;
1197
1198 npage = f2fs_get_node_page(sbi, nid);
1199 if (IS_ERR(npage))
1200 return PTR_ERR(npage);
1201
1202 set_new_dnode(&dn, inode, NULL, npage, nid);
1203 err = truncate_node(&dn);
1204 if (err) {
1205 f2fs_put_page(npage, 1);
1206 return err;
1207 }
1208
1209 f2fs_i_xnid_write(inode, 0);
1210
1211 return 0;
1212 }
1213
1214 /*
1215 * Caller should grab and release a rwsem by calling f2fs_lock_op() and
1216 * f2fs_unlock_op().
1217 */
f2fs_remove_inode_page(struct inode * inode)1218 int f2fs_remove_inode_page(struct inode *inode)
1219 {
1220 struct dnode_of_data dn;
1221 int err;
1222
1223 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
1224 err = f2fs_get_dnode_of_data(&dn, 0, LOOKUP_NODE);
1225 if (err)
1226 return err;
1227
1228 err = f2fs_truncate_xattr_node(inode);
1229 if (err) {
1230 f2fs_put_dnode(&dn);
1231 return err;
1232 }
1233
1234 /* remove potential inline_data blocks */
1235 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1236 S_ISLNK(inode->i_mode))
1237 f2fs_truncate_data_blocks_range(&dn, 1);
1238
1239 /* 0 is possible, after f2fs_new_inode() has failed */
1240 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
1241 f2fs_put_dnode(&dn);
1242 return -EIO;
1243 }
1244
1245 if (unlikely(inode->i_blocks != 0 && inode->i_blocks != 8)) {
1246 f2fs_warn(F2FS_I_SB(inode),
1247 "f2fs_remove_inode_page: inconsistent i_blocks, ino:%lu, iblocks:%llu",
1248 inode->i_ino, (unsigned long long)inode->i_blocks);
1249 set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
1250 }
1251
1252 /* will put inode & node pages */
1253 err = truncate_node(&dn);
1254 if (err) {
1255 f2fs_put_dnode(&dn);
1256 return err;
1257 }
1258 return 0;
1259 }
1260
f2fs_new_inode_page(struct inode * inode)1261 struct page *f2fs_new_inode_page(struct inode *inode)
1262 {
1263 struct dnode_of_data dn;
1264
1265 /* allocate inode page for new inode */
1266 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
1267
1268 /* caller should f2fs_put_page(page, 1); */
1269 return f2fs_new_node_page(&dn, 0);
1270 }
1271
f2fs_new_node_page(struct dnode_of_data * dn,unsigned int ofs)1272 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
1273 {
1274 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1275 struct node_info new_ni;
1276 struct page *page;
1277 int err;
1278
1279 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1280 return ERR_PTR(-EPERM);
1281
1282 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false);
1283 if (!page)
1284 return ERR_PTR(-ENOMEM);
1285
1286 if (unlikely((err = inc_valid_node_count(sbi, dn->inode, !ofs))))
1287 goto fail;
1288
1289 #ifdef CONFIG_F2FS_CHECK_FS
1290 err = f2fs_get_node_info(sbi, dn->nid, &new_ni, false);
1291 if (err) {
1292 dec_valid_node_count(sbi, dn->inode, !ofs);
1293 goto fail;
1294 }
1295 if (unlikely(new_ni.blk_addr != NULL_ADDR)) {
1296 err = -EFSCORRUPTED;
1297 set_sbi_flag(sbi, SBI_NEED_FSCK);
1298 goto fail;
1299 }
1300 #endif
1301 new_ni.nid = dn->nid;
1302 new_ni.ino = dn->inode->i_ino;
1303 new_ni.blk_addr = NULL_ADDR;
1304 new_ni.flag = 0;
1305 new_ni.version = 0;
1306 set_node_addr(sbi, &new_ni, NEW_ADDR, false);
1307
1308 f2fs_wait_on_page_writeback(page, NODE, true, true);
1309 fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
1310 set_cold_node(page, S_ISDIR(dn->inode->i_mode));
1311 if (!PageUptodate(page))
1312 SetPageUptodate(page);
1313 if (set_page_dirty(page))
1314 dn->node_changed = true;
1315
1316 if (f2fs_has_xattr_block(ofs))
1317 f2fs_i_xnid_write(dn->inode, dn->nid);
1318
1319 if (ofs == 0)
1320 inc_valid_inode_count(sbi);
1321 return page;
1322
1323 fail:
1324 clear_node_page_dirty(page);
1325 f2fs_put_page(page, 1);
1326 return ERR_PTR(err);
1327 }
1328
1329 /*
1330 * Caller should do after getting the following values.
1331 * 0: f2fs_put_page(page, 0)
1332 * LOCKED_PAGE or error: f2fs_put_page(page, 1)
1333 */
read_node_page(struct page * page,int op_flags)1334 static int read_node_page(struct page *page, int op_flags)
1335 {
1336 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1337 struct node_info ni;
1338 struct f2fs_io_info fio = {
1339 .sbi = sbi,
1340 .type = NODE,
1341 .op = REQ_OP_READ,
1342 .op_flags = op_flags,
1343 .page = page,
1344 .encrypted_page = NULL,
1345 };
1346 int err;
1347
1348 if (PageUptodate(page)) {
1349 if (!f2fs_inode_chksum_verify(sbi, page)) {
1350 ClearPageUptodate(page);
1351 return -EFSBADCRC;
1352 }
1353 return LOCKED_PAGE;
1354 }
1355
1356 err = f2fs_get_node_info(sbi, page->index, &ni, false);
1357 if (err)
1358 return err;
1359
1360 /* NEW_ADDR can be seen, after cp_error drops some dirty node pages */
1361 if (unlikely(ni.blk_addr == NULL_ADDR || ni.blk_addr == NEW_ADDR) ||
1362 is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)) {
1363 ClearPageUptodate(page);
1364 return -ENOENT;
1365 }
1366
1367 fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr;
1368
1369 err = f2fs_submit_page_bio(&fio);
1370
1371 if (!err)
1372 f2fs_update_iostat(sbi, FS_NODE_READ_IO, F2FS_BLKSIZE);
1373
1374 return err;
1375 }
1376
1377 /*
1378 * Readahead a node page
1379 */
f2fs_ra_node_page(struct f2fs_sb_info * sbi,nid_t nid)1380 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
1381 {
1382 struct page *apage;
1383 int err;
1384
1385 if (!nid)
1386 return;
1387 if (f2fs_check_nid_range(sbi, nid))
1388 return;
1389
1390 apage = xa_load(&NODE_MAPPING(sbi)->i_pages, nid);
1391 if (apage)
1392 return;
1393
1394 apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1395 if (!apage)
1396 return;
1397
1398 err = read_node_page(apage, REQ_RAHEAD);
1399 f2fs_put_page(apage, err ? 1 : 0);
1400 }
1401
__get_node_page(struct f2fs_sb_info * sbi,pgoff_t nid,struct page * parent,int start)1402 static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
1403 struct page *parent, int start)
1404 {
1405 struct page *page;
1406 int err;
1407
1408 if (!nid)
1409 return ERR_PTR(-ENOENT);
1410 if (f2fs_check_nid_range(sbi, nid))
1411 return ERR_PTR(-EINVAL);
1412 repeat:
1413 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1414 if (!page)
1415 return ERR_PTR(-ENOMEM);
1416
1417 err = read_node_page(page, 0);
1418 if (err < 0) {
1419 goto out_put_err;
1420 } else if (err == LOCKED_PAGE) {
1421 err = 0;
1422 goto page_hit;
1423 }
1424
1425 if (parent)
1426 f2fs_ra_node_pages(parent, start + 1, MAX_RA_NODE);
1427
1428 lock_page(page);
1429
1430 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1431 f2fs_put_page(page, 1);
1432 goto repeat;
1433 }
1434
1435 if (unlikely(!PageUptodate(page))) {
1436 err = -EIO;
1437 goto out_err;
1438 }
1439
1440 if (!f2fs_inode_chksum_verify(sbi, page)) {
1441 err = -EFSBADCRC;
1442 goto out_err;
1443 }
1444 page_hit:
1445 if (likely(nid == nid_of_node(page)))
1446 return page;
1447
1448 f2fs_warn(sbi, "inconsistent node block, nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]",
1449 nid, nid_of_node(page), ino_of_node(page),
1450 ofs_of_node(page), cpver_of_node(page),
1451 next_blkaddr_of_node(page));
1452 set_sbi_flag(sbi, SBI_NEED_FSCK);
1453 err = -EINVAL;
1454 out_err:
1455 ClearPageUptodate(page);
1456 out_put_err:
1457 /* ENOENT comes from read_node_page which is not an error. */
1458 if (err != -ENOENT)
1459 f2fs_handle_page_eio(sbi, page->index, NODE);
1460 f2fs_put_page(page, 1);
1461 return ERR_PTR(err);
1462 }
1463
f2fs_get_node_page(struct f2fs_sb_info * sbi,pgoff_t nid)1464 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
1465 {
1466 return __get_node_page(sbi, nid, NULL, 0);
1467 }
1468
f2fs_get_node_page_ra(struct page * parent,int start)1469 struct page *f2fs_get_node_page_ra(struct page *parent, int start)
1470 {
1471 struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
1472 nid_t nid = get_nid(parent, start, false);
1473
1474 return __get_node_page(sbi, nid, parent, start);
1475 }
1476
flush_inline_data(struct f2fs_sb_info * sbi,nid_t ino)1477 static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
1478 {
1479 struct inode *inode;
1480 struct page *page;
1481 int ret;
1482
1483 /* should flush inline_data before evict_inode */
1484 inode = ilookup(sbi->sb, ino);
1485 if (!inode)
1486 return;
1487
1488 page = f2fs_pagecache_get_page(inode->i_mapping, 0,
1489 FGP_LOCK|FGP_NOWAIT, 0);
1490 if (!page)
1491 goto iput_out;
1492
1493 if (!PageUptodate(page))
1494 goto page_out;
1495
1496 if (!PageDirty(page))
1497 goto page_out;
1498
1499 if (!clear_page_dirty_for_io(page))
1500 goto page_out;
1501
1502 ret = f2fs_write_inline_data(inode, page);
1503 inode_dec_dirty_pages(inode);
1504 f2fs_remove_dirty_inode(inode);
1505 if (ret)
1506 set_page_dirty(page);
1507 page_out:
1508 f2fs_put_page(page, 1);
1509 iput_out:
1510 iput(inode);
1511 }
1512
last_fsync_dnode(struct f2fs_sb_info * sbi,nid_t ino)1513 static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
1514 {
1515 pgoff_t index;
1516 struct pagevec pvec;
1517 struct page *last_page = NULL;
1518 int nr_pages;
1519
1520 pagevec_init(&pvec);
1521 index = 0;
1522
1523 while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1524 PAGECACHE_TAG_DIRTY))) {
1525 int i;
1526
1527 for (i = 0; i < nr_pages; i++) {
1528 struct page *page = pvec.pages[i];
1529
1530 if (unlikely(f2fs_cp_error(sbi))) {
1531 f2fs_put_page(last_page, 0);
1532 pagevec_release(&pvec);
1533 return ERR_PTR(-EIO);
1534 }
1535
1536 if (!IS_DNODE(page) || !is_cold_node(page))
1537 continue;
1538 if (ino_of_node(page) != ino)
1539 continue;
1540
1541 lock_page(page);
1542
1543 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1544 continue_unlock:
1545 unlock_page(page);
1546 continue;
1547 }
1548 if (ino_of_node(page) != ino)
1549 goto continue_unlock;
1550
1551 if (!PageDirty(page)) {
1552 /* someone wrote it for us */
1553 goto continue_unlock;
1554 }
1555
1556 if (last_page)
1557 f2fs_put_page(last_page, 0);
1558
1559 get_page(page);
1560 last_page = page;
1561 unlock_page(page);
1562 }
1563 pagevec_release(&pvec);
1564 cond_resched();
1565 }
1566 return last_page;
1567 }
1568
__write_node_page(struct page * page,bool atomic,bool * submitted,struct writeback_control * wbc,bool do_balance,enum iostat_type io_type,unsigned int * seq_id)1569 static int __write_node_page(struct page *page, bool atomic, bool *submitted,
1570 struct writeback_control *wbc, bool do_balance,
1571 enum iostat_type io_type, unsigned int *seq_id)
1572 {
1573 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1574 nid_t nid;
1575 struct node_info ni;
1576 struct f2fs_io_info fio = {
1577 .sbi = sbi,
1578 .ino = ino_of_node(page),
1579 .type = NODE,
1580 .op = REQ_OP_WRITE,
1581 .op_flags = wbc_to_write_flags(wbc),
1582 .page = page,
1583 .encrypted_page = NULL,
1584 .submitted = false,
1585 .io_type = io_type,
1586 .io_wbc = wbc,
1587 };
1588 unsigned int seq;
1589
1590 trace_f2fs_writepage(page, NODE);
1591
1592 if (unlikely(f2fs_cp_error(sbi))) {
1593 ClearPageUptodate(page);
1594 dec_page_count(sbi, F2FS_DIRTY_NODES);
1595 unlock_page(page);
1596 return 0;
1597 }
1598
1599 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1600 goto redirty_out;
1601
1602 if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
1603 wbc->sync_mode == WB_SYNC_NONE &&
1604 IS_DNODE(page) && is_cold_node(page))
1605 goto redirty_out;
1606
1607 /* get old block addr of this node page */
1608 nid = nid_of_node(page);
1609 f2fs_bug_on(sbi, page->index != nid);
1610
1611 if (f2fs_get_node_info(sbi, nid, &ni, !do_balance))
1612 goto redirty_out;
1613
1614 if (wbc->for_reclaim) {
1615 if (!f2fs_down_read_trylock(&sbi->node_write))
1616 goto redirty_out;
1617 } else {
1618 f2fs_down_read(&sbi->node_write);
1619 }
1620
1621 /* This page is already truncated */
1622 if (unlikely(ni.blk_addr == NULL_ADDR)) {
1623 ClearPageUptodate(page);
1624 dec_page_count(sbi, F2FS_DIRTY_NODES);
1625 f2fs_up_read(&sbi->node_write);
1626 unlock_page(page);
1627 return 0;
1628 }
1629
1630 if (__is_valid_data_blkaddr(ni.blk_addr) &&
1631 !f2fs_is_valid_blkaddr(sbi, ni.blk_addr,
1632 DATA_GENERIC_ENHANCE)) {
1633 f2fs_up_read(&sbi->node_write);
1634 goto redirty_out;
1635 }
1636
1637 if (atomic && !test_opt(sbi, NOBARRIER) && !f2fs_sb_has_blkzoned(sbi))
1638 fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
1639
1640 /* should add to global list before clearing PAGECACHE status */
1641 if (f2fs_in_warm_node_list(sbi, page)) {
1642 seq = f2fs_add_fsync_node_entry(sbi, page);
1643 if (seq_id)
1644 *seq_id = seq;
1645 }
1646
1647 set_page_writeback(page);
1648 ClearPageError(page);
1649
1650 fio.old_blkaddr = ni.blk_addr;
1651 f2fs_do_write_node_page(nid, &fio);
1652 set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
1653 dec_page_count(sbi, F2FS_DIRTY_NODES);
1654 f2fs_up_read(&sbi->node_write);
1655
1656 if (wbc->for_reclaim) {
1657 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, NODE);
1658 submitted = NULL;
1659 }
1660
1661 unlock_page(page);
1662
1663 if (unlikely(f2fs_cp_error(sbi))) {
1664 f2fs_submit_merged_write(sbi, NODE);
1665 submitted = NULL;
1666 }
1667 if (submitted)
1668 *submitted = fio.submitted;
1669
1670 if (do_balance)
1671 f2fs_balance_fs(sbi, false);
1672 return 0;
1673
1674 redirty_out:
1675 redirty_page_for_writepage(wbc, page);
1676 return AOP_WRITEPAGE_ACTIVATE;
1677 }
1678
f2fs_move_node_page(struct page * node_page,int gc_type)1679 int f2fs_move_node_page(struct page *node_page, int gc_type)
1680 {
1681 int err = 0;
1682
1683 if (gc_type == FG_GC) {
1684 struct writeback_control wbc = {
1685 .sync_mode = WB_SYNC_ALL,
1686 .nr_to_write = 1,
1687 .for_reclaim = 0,
1688 };
1689
1690 f2fs_wait_on_page_writeback(node_page, NODE, true, true);
1691
1692 set_page_dirty(node_page);
1693
1694 if (!clear_page_dirty_for_io(node_page)) {
1695 err = -EAGAIN;
1696 goto out_page;
1697 }
1698
1699 if (__write_node_page(node_page, false, NULL,
1700 &wbc, false, FS_GC_NODE_IO, NULL)) {
1701 err = -EAGAIN;
1702 unlock_page(node_page);
1703 }
1704 goto release_page;
1705 } else {
1706 /* set page dirty and write it */
1707 if (!PageWriteback(node_page))
1708 set_page_dirty(node_page);
1709 }
1710 out_page:
1711 unlock_page(node_page);
1712 release_page:
1713 f2fs_put_page(node_page, 0);
1714 return err;
1715 }
1716
f2fs_write_node_page(struct page * page,struct writeback_control * wbc)1717 static int f2fs_write_node_page(struct page *page,
1718 struct writeback_control *wbc)
1719 {
1720 return __write_node_page(page, false, NULL, wbc, false,
1721 FS_NODE_IO, NULL);
1722 }
1723
f2fs_fsync_node_pages(struct f2fs_sb_info * sbi,struct inode * inode,struct writeback_control * wbc,bool atomic,unsigned int * seq_id)1724 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
1725 struct writeback_control *wbc, bool atomic,
1726 unsigned int *seq_id)
1727 {
1728 pgoff_t index;
1729 struct pagevec pvec;
1730 int ret = 0;
1731 struct page *last_page = NULL;
1732 bool marked = false;
1733 nid_t ino = inode->i_ino;
1734 int nr_pages;
1735 int nwritten = 0;
1736
1737 if (atomic) {
1738 last_page = last_fsync_dnode(sbi, ino);
1739 if (IS_ERR_OR_NULL(last_page))
1740 return PTR_ERR_OR_ZERO(last_page);
1741 }
1742 retry:
1743 pagevec_init(&pvec);
1744 index = 0;
1745
1746 while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1747 PAGECACHE_TAG_DIRTY))) {
1748 int i;
1749
1750 for (i = 0; i < nr_pages; i++) {
1751 struct page *page = pvec.pages[i];
1752 bool submitted = false;
1753
1754 if (unlikely(f2fs_cp_error(sbi))) {
1755 f2fs_put_page(last_page, 0);
1756 pagevec_release(&pvec);
1757 ret = -EIO;
1758 goto out;
1759 }
1760
1761 if (!IS_DNODE(page) || !is_cold_node(page))
1762 continue;
1763 if (ino_of_node(page) != ino)
1764 continue;
1765
1766 lock_page(page);
1767
1768 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1769 continue_unlock:
1770 unlock_page(page);
1771 continue;
1772 }
1773 if (ino_of_node(page) != ino)
1774 goto continue_unlock;
1775
1776 if (!PageDirty(page) && page != last_page) {
1777 /* someone wrote it for us */
1778 goto continue_unlock;
1779 }
1780
1781 f2fs_wait_on_page_writeback(page, NODE, true, true);
1782
1783 set_fsync_mark(page, 0);
1784 set_dentry_mark(page, 0);
1785
1786 if (!atomic || page == last_page) {
1787 set_fsync_mark(page, 1);
1788 percpu_counter_inc(&sbi->rf_node_block_count);
1789 if (IS_INODE(page)) {
1790 if (is_inode_flag_set(inode,
1791 FI_DIRTY_INODE))
1792 f2fs_update_inode(inode, page);
1793 set_dentry_mark(page,
1794 f2fs_need_dentry_mark(sbi, ino));
1795 }
1796 /* may be written by other thread */
1797 if (!PageDirty(page))
1798 set_page_dirty(page);
1799 }
1800
1801 if (!clear_page_dirty_for_io(page))
1802 goto continue_unlock;
1803
1804 ret = __write_node_page(page, atomic &&
1805 page == last_page,
1806 &submitted, wbc, true,
1807 FS_NODE_IO, seq_id);
1808 if (ret) {
1809 unlock_page(page);
1810 f2fs_put_page(last_page, 0);
1811 break;
1812 } else if (submitted) {
1813 nwritten++;
1814 }
1815
1816 if (page == last_page) {
1817 f2fs_put_page(page, 0);
1818 marked = true;
1819 break;
1820 }
1821 }
1822 pagevec_release(&pvec);
1823 cond_resched();
1824
1825 if (ret || marked)
1826 break;
1827 }
1828 if (!ret && atomic && !marked) {
1829 f2fs_debug(sbi, "Retry to write fsync mark: ino=%u, idx=%lx",
1830 ino, last_page->index);
1831 lock_page(last_page);
1832 f2fs_wait_on_page_writeback(last_page, NODE, true, true);
1833 set_page_dirty(last_page);
1834 unlock_page(last_page);
1835 goto retry;
1836 }
1837 out:
1838 if (nwritten)
1839 f2fs_submit_merged_write_cond(sbi, NULL, NULL, ino, NODE);
1840 return ret ? -EIO : 0;
1841 }
1842
f2fs_match_ino(struct inode * inode,unsigned long ino,void * data)1843 static int f2fs_match_ino(struct inode *inode, unsigned long ino, void *data)
1844 {
1845 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1846 bool clean;
1847
1848 if (inode->i_ino != ino)
1849 return 0;
1850
1851 if (!is_inode_flag_set(inode, FI_DIRTY_INODE))
1852 return 0;
1853
1854 spin_lock(&sbi->inode_lock[DIRTY_META]);
1855 clean = list_empty(&F2FS_I(inode)->gdirty_list);
1856 spin_unlock(&sbi->inode_lock[DIRTY_META]);
1857
1858 if (clean)
1859 return 0;
1860
1861 inode = igrab(inode);
1862 if (!inode)
1863 return 0;
1864 return 1;
1865 }
1866
flush_dirty_inode(struct page * page)1867 static bool flush_dirty_inode(struct page *page)
1868 {
1869 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1870 struct inode *inode;
1871 nid_t ino = ino_of_node(page);
1872
1873 inode = find_inode_nowait(sbi->sb, ino, f2fs_match_ino, NULL);
1874 if (!inode)
1875 return false;
1876
1877 f2fs_update_inode(inode, page);
1878 unlock_page(page);
1879
1880 iput(inode);
1881 return true;
1882 }
1883
f2fs_flush_inline_data(struct f2fs_sb_info * sbi)1884 void f2fs_flush_inline_data(struct f2fs_sb_info *sbi)
1885 {
1886 pgoff_t index = 0;
1887 struct pagevec pvec;
1888 int nr_pages;
1889
1890 pagevec_init(&pvec);
1891
1892 while ((nr_pages = pagevec_lookup_tag(&pvec,
1893 NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) {
1894 int i;
1895
1896 for (i = 0; i < nr_pages; i++) {
1897 struct page *page = pvec.pages[i];
1898
1899 if (!IS_DNODE(page))
1900 continue;
1901
1902 lock_page(page);
1903
1904 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1905 continue_unlock:
1906 unlock_page(page);
1907 continue;
1908 }
1909
1910 if (!PageDirty(page)) {
1911 /* someone wrote it for us */
1912 goto continue_unlock;
1913 }
1914
1915 /* flush inline_data, if it's async context. */
1916 if (page_private_inline(page)) {
1917 clear_page_private_inline(page);
1918 unlock_page(page);
1919 flush_inline_data(sbi, ino_of_node(page));
1920 continue;
1921 }
1922 unlock_page(page);
1923 }
1924 pagevec_release(&pvec);
1925 cond_resched();
1926 }
1927 }
1928
f2fs_sync_node_pages(struct f2fs_sb_info * sbi,struct writeback_control * wbc,bool do_balance,enum iostat_type io_type)1929 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
1930 struct writeback_control *wbc,
1931 bool do_balance, enum iostat_type io_type)
1932 {
1933 pgoff_t index;
1934 struct pagevec pvec;
1935 int step = 0;
1936 int nwritten = 0;
1937 int ret = 0;
1938 int nr_pages, done = 0;
1939
1940 pagevec_init(&pvec);
1941
1942 next_step:
1943 index = 0;
1944
1945 while (!done && (nr_pages = pagevec_lookup_tag(&pvec,
1946 NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) {
1947 int i;
1948
1949 for (i = 0; i < nr_pages; i++) {
1950 struct page *page = pvec.pages[i];
1951 bool submitted = false;
1952 bool may_dirty = true;
1953
1954 /* give a priority to WB_SYNC threads */
1955 if (atomic_read(&sbi->wb_sync_req[NODE]) &&
1956 wbc->sync_mode == WB_SYNC_NONE) {
1957 done = 1;
1958 break;
1959 }
1960
1961 /*
1962 * flushing sequence with step:
1963 * 0. indirect nodes
1964 * 1. dentry dnodes
1965 * 2. file dnodes
1966 */
1967 if (step == 0 && IS_DNODE(page))
1968 continue;
1969 if (step == 1 && (!IS_DNODE(page) ||
1970 is_cold_node(page)))
1971 continue;
1972 if (step == 2 && (!IS_DNODE(page) ||
1973 !is_cold_node(page)))
1974 continue;
1975 lock_node:
1976 if (wbc->sync_mode == WB_SYNC_ALL)
1977 lock_page(page);
1978 else if (!trylock_page(page))
1979 continue;
1980
1981 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1982 continue_unlock:
1983 unlock_page(page);
1984 continue;
1985 }
1986
1987 if (!PageDirty(page)) {
1988 /* someone wrote it for us */
1989 goto continue_unlock;
1990 }
1991
1992 /* flush inline_data/inode, if it's async context. */
1993 if (!do_balance)
1994 goto write_node;
1995
1996 /* flush inline_data */
1997 if (page_private_inline(page)) {
1998 clear_page_private_inline(page);
1999 unlock_page(page);
2000 flush_inline_data(sbi, ino_of_node(page));
2001 goto lock_node;
2002 }
2003
2004 /* flush dirty inode */
2005 if (IS_INODE(page) && may_dirty) {
2006 may_dirty = false;
2007 if (flush_dirty_inode(page))
2008 goto lock_node;
2009 }
2010 write_node:
2011 f2fs_wait_on_page_writeback(page, NODE, true, true);
2012
2013 if (!clear_page_dirty_for_io(page))
2014 goto continue_unlock;
2015
2016 set_fsync_mark(page, 0);
2017 set_dentry_mark(page, 0);
2018
2019 ret = __write_node_page(page, false, &submitted,
2020 wbc, do_balance, io_type, NULL);
2021 if (ret)
2022 unlock_page(page);
2023 else if (submitted)
2024 nwritten++;
2025
2026 if (--wbc->nr_to_write == 0)
2027 break;
2028 }
2029 pagevec_release(&pvec);
2030 cond_resched();
2031
2032 if (wbc->nr_to_write == 0) {
2033 step = 2;
2034 break;
2035 }
2036 }
2037
2038 if (step < 2) {
2039 if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2040 wbc->sync_mode == WB_SYNC_NONE && step == 1)
2041 goto out;
2042 step++;
2043 goto next_step;
2044 }
2045 out:
2046 if (nwritten)
2047 f2fs_submit_merged_write(sbi, NODE);
2048
2049 if (unlikely(f2fs_cp_error(sbi)))
2050 return -EIO;
2051 return ret;
2052 }
2053
f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info * sbi,unsigned int seq_id)2054 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
2055 unsigned int seq_id)
2056 {
2057 struct fsync_node_entry *fn;
2058 struct page *page;
2059 struct list_head *head = &sbi->fsync_node_list;
2060 unsigned long flags;
2061 unsigned int cur_seq_id = 0;
2062 int ret2, ret = 0;
2063
2064 while (seq_id && cur_seq_id < seq_id) {
2065 spin_lock_irqsave(&sbi->fsync_node_lock, flags);
2066 if (list_empty(head)) {
2067 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
2068 break;
2069 }
2070 fn = list_first_entry(head, struct fsync_node_entry, list);
2071 if (fn->seq_id > seq_id) {
2072 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
2073 break;
2074 }
2075 cur_seq_id = fn->seq_id;
2076 page = fn->page;
2077 get_page(page);
2078 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
2079
2080 f2fs_wait_on_page_writeback(page, NODE, true, false);
2081 if (TestClearPageError(page))
2082 ret = -EIO;
2083
2084 put_page(page);
2085
2086 if (ret)
2087 break;
2088 }
2089
2090 ret2 = filemap_check_errors(NODE_MAPPING(sbi));
2091 if (!ret)
2092 ret = ret2;
2093
2094 return ret;
2095 }
2096
f2fs_write_node_pages(struct address_space * mapping,struct writeback_control * wbc)2097 static int f2fs_write_node_pages(struct address_space *mapping,
2098 struct writeback_control *wbc)
2099 {
2100 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
2101 struct blk_plug plug;
2102 long diff;
2103
2104 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
2105 goto skip_write;
2106
2107 /* balancing f2fs's metadata in background */
2108 f2fs_balance_fs_bg(sbi, true);
2109
2110 /* collect a number of dirty node pages and write together */
2111 if (wbc->sync_mode != WB_SYNC_ALL &&
2112 get_pages(sbi, F2FS_DIRTY_NODES) <
2113 nr_pages_to_skip(sbi, NODE))
2114 goto skip_write;
2115
2116 if (wbc->sync_mode == WB_SYNC_ALL)
2117 atomic_inc(&sbi->wb_sync_req[NODE]);
2118 else if (atomic_read(&sbi->wb_sync_req[NODE])) {
2119 /* to avoid potential deadlock */
2120 if (current->plug)
2121 blk_finish_plug(current->plug);
2122 goto skip_write;
2123 }
2124
2125 trace_f2fs_writepages(mapping->host, wbc, NODE);
2126
2127 diff = nr_pages_to_write(sbi, NODE, wbc);
2128 blk_start_plug(&plug);
2129 f2fs_sync_node_pages(sbi, wbc, true, FS_NODE_IO);
2130 blk_finish_plug(&plug);
2131 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
2132
2133 if (wbc->sync_mode == WB_SYNC_ALL)
2134 atomic_dec(&sbi->wb_sync_req[NODE]);
2135 return 0;
2136
2137 skip_write:
2138 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
2139 trace_f2fs_writepages(mapping->host, wbc, NODE);
2140 return 0;
2141 }
2142
f2fs_dirty_node_folio(struct address_space * mapping,struct folio * folio)2143 static bool f2fs_dirty_node_folio(struct address_space *mapping,
2144 struct folio *folio)
2145 {
2146 trace_f2fs_set_page_dirty(&folio->page, NODE);
2147
2148 if (!folio_test_uptodate(folio))
2149 folio_mark_uptodate(folio);
2150 #ifdef CONFIG_F2FS_CHECK_FS
2151 if (IS_INODE(&folio->page))
2152 f2fs_inode_chksum_set(F2FS_M_SB(mapping), &folio->page);
2153 #endif
2154 if (!folio_test_dirty(folio)) {
2155 filemap_dirty_folio(mapping, folio);
2156 inc_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_NODES);
2157 set_page_private_reference(&folio->page);
2158 return true;
2159 }
2160 return false;
2161 }
2162
2163 /*
2164 * Structure of the f2fs node operations
2165 */
2166 const struct address_space_operations f2fs_node_aops = {
2167 .writepage = f2fs_write_node_page,
2168 .writepages = f2fs_write_node_pages,
2169 .dirty_folio = f2fs_dirty_node_folio,
2170 .invalidate_folio = f2fs_invalidate_folio,
2171 .release_folio = f2fs_release_folio,
2172 #ifdef CONFIG_MIGRATION
2173 .migratepage = f2fs_migrate_page,
2174 #endif
2175 };
2176
__lookup_free_nid_list(struct f2fs_nm_info * nm_i,nid_t n)2177 static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
2178 nid_t n)
2179 {
2180 return radix_tree_lookup(&nm_i->free_nid_root, n);
2181 }
2182
__insert_free_nid(struct f2fs_sb_info * sbi,struct free_nid * i)2183 static int __insert_free_nid(struct f2fs_sb_info *sbi,
2184 struct free_nid *i)
2185 {
2186 struct f2fs_nm_info *nm_i = NM_I(sbi);
2187 int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i);
2188
2189 if (err)
2190 return err;
2191
2192 nm_i->nid_cnt[FREE_NID]++;
2193 list_add_tail(&i->list, &nm_i->free_nid_list);
2194 return 0;
2195 }
2196
__remove_free_nid(struct f2fs_sb_info * sbi,struct free_nid * i,enum nid_state state)2197 static void __remove_free_nid(struct f2fs_sb_info *sbi,
2198 struct free_nid *i, enum nid_state state)
2199 {
2200 struct f2fs_nm_info *nm_i = NM_I(sbi);
2201
2202 f2fs_bug_on(sbi, state != i->state);
2203 nm_i->nid_cnt[state]--;
2204 if (state == FREE_NID)
2205 list_del(&i->list);
2206 radix_tree_delete(&nm_i->free_nid_root, i->nid);
2207 }
2208
__move_free_nid(struct f2fs_sb_info * sbi,struct free_nid * i,enum nid_state org_state,enum nid_state dst_state)2209 static void __move_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i,
2210 enum nid_state org_state, enum nid_state dst_state)
2211 {
2212 struct f2fs_nm_info *nm_i = NM_I(sbi);
2213
2214 f2fs_bug_on(sbi, org_state != i->state);
2215 i->state = dst_state;
2216 nm_i->nid_cnt[org_state]--;
2217 nm_i->nid_cnt[dst_state]++;
2218
2219 switch (dst_state) {
2220 case PREALLOC_NID:
2221 list_del(&i->list);
2222 break;
2223 case FREE_NID:
2224 list_add_tail(&i->list, &nm_i->free_nid_list);
2225 break;
2226 default:
2227 BUG_ON(1);
2228 }
2229 }
2230
f2fs_nat_bitmap_enabled(struct f2fs_sb_info * sbi)2231 bool f2fs_nat_bitmap_enabled(struct f2fs_sb_info *sbi)
2232 {
2233 struct f2fs_nm_info *nm_i = NM_I(sbi);
2234 unsigned int i;
2235 bool ret = true;
2236
2237 f2fs_down_read(&nm_i->nat_tree_lock);
2238 for (i = 0; i < nm_i->nat_blocks; i++) {
2239 if (!test_bit_le(i, nm_i->nat_block_bitmap)) {
2240 ret = false;
2241 break;
2242 }
2243 }
2244 f2fs_up_read(&nm_i->nat_tree_lock);
2245
2246 return ret;
2247 }
2248
update_free_nid_bitmap(struct f2fs_sb_info * sbi,nid_t nid,bool set,bool build)2249 static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid,
2250 bool set, bool build)
2251 {
2252 struct f2fs_nm_info *nm_i = NM_I(sbi);
2253 unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid);
2254 unsigned int nid_ofs = nid - START_NID(nid);
2255
2256 if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
2257 return;
2258
2259 if (set) {
2260 if (test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
2261 return;
2262 __set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
2263 nm_i->free_nid_count[nat_ofs]++;
2264 } else {
2265 if (!test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
2266 return;
2267 __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
2268 if (!build)
2269 nm_i->free_nid_count[nat_ofs]--;
2270 }
2271 }
2272
2273 /* return if the nid is recognized as free */
add_free_nid(struct f2fs_sb_info * sbi,nid_t nid,bool build,bool update)2274 static bool add_free_nid(struct f2fs_sb_info *sbi,
2275 nid_t nid, bool build, bool update)
2276 {
2277 struct f2fs_nm_info *nm_i = NM_I(sbi);
2278 struct free_nid *i, *e;
2279 struct nat_entry *ne;
2280 int err = -EINVAL;
2281 bool ret = false;
2282
2283 /* 0 nid should not be used */
2284 if (unlikely(nid == 0))
2285 return false;
2286
2287 if (unlikely(f2fs_check_nid_range(sbi, nid)))
2288 return false;
2289
2290 i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS, true, NULL);
2291 i->nid = nid;
2292 i->state = FREE_NID;
2293
2294 radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
2295
2296 spin_lock(&nm_i->nid_list_lock);
2297
2298 if (build) {
2299 /*
2300 * Thread A Thread B
2301 * - f2fs_create
2302 * - f2fs_new_inode
2303 * - f2fs_alloc_nid
2304 * - __insert_nid_to_list(PREALLOC_NID)
2305 * - f2fs_balance_fs_bg
2306 * - f2fs_build_free_nids
2307 * - __f2fs_build_free_nids
2308 * - scan_nat_page
2309 * - add_free_nid
2310 * - __lookup_nat_cache
2311 * - f2fs_add_link
2312 * - f2fs_init_inode_metadata
2313 * - f2fs_new_inode_page
2314 * - f2fs_new_node_page
2315 * - set_node_addr
2316 * - f2fs_alloc_nid_done
2317 * - __remove_nid_from_list(PREALLOC_NID)
2318 * - __insert_nid_to_list(FREE_NID)
2319 */
2320 ne = __lookup_nat_cache(nm_i, nid);
2321 if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
2322 nat_get_blkaddr(ne) != NULL_ADDR))
2323 goto err_out;
2324
2325 e = __lookup_free_nid_list(nm_i, nid);
2326 if (e) {
2327 if (e->state == FREE_NID)
2328 ret = true;
2329 goto err_out;
2330 }
2331 }
2332 ret = true;
2333 err = __insert_free_nid(sbi, i);
2334 err_out:
2335 if (update) {
2336 update_free_nid_bitmap(sbi, nid, ret, build);
2337 if (!build)
2338 nm_i->available_nids++;
2339 }
2340 spin_unlock(&nm_i->nid_list_lock);
2341 radix_tree_preload_end();
2342
2343 if (err)
2344 kmem_cache_free(free_nid_slab, i);
2345 return ret;
2346 }
2347
remove_free_nid(struct f2fs_sb_info * sbi,nid_t nid)2348 static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
2349 {
2350 struct f2fs_nm_info *nm_i = NM_I(sbi);
2351 struct free_nid *i;
2352 bool need_free = false;
2353
2354 spin_lock(&nm_i->nid_list_lock);
2355 i = __lookup_free_nid_list(nm_i, nid);
2356 if (i && i->state == FREE_NID) {
2357 __remove_free_nid(sbi, i, FREE_NID);
2358 need_free = true;
2359 }
2360 spin_unlock(&nm_i->nid_list_lock);
2361
2362 if (need_free)
2363 kmem_cache_free(free_nid_slab, i);
2364 }
2365
scan_nat_page(struct f2fs_sb_info * sbi,struct page * nat_page,nid_t start_nid)2366 static int scan_nat_page(struct f2fs_sb_info *sbi,
2367 struct page *nat_page, nid_t start_nid)
2368 {
2369 struct f2fs_nm_info *nm_i = NM_I(sbi);
2370 struct f2fs_nat_block *nat_blk = page_address(nat_page);
2371 block_t blk_addr;
2372 unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid);
2373 int i;
2374
2375 __set_bit_le(nat_ofs, nm_i->nat_block_bitmap);
2376
2377 i = start_nid % NAT_ENTRY_PER_BLOCK;
2378
2379 for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
2380 if (unlikely(start_nid >= nm_i->max_nid))
2381 break;
2382
2383 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
2384
2385 if (blk_addr == NEW_ADDR)
2386 return -EINVAL;
2387
2388 if (blk_addr == NULL_ADDR) {
2389 add_free_nid(sbi, start_nid, true, true);
2390 } else {
2391 spin_lock(&NM_I(sbi)->nid_list_lock);
2392 update_free_nid_bitmap(sbi, start_nid, false, true);
2393 spin_unlock(&NM_I(sbi)->nid_list_lock);
2394 }
2395 }
2396
2397 return 0;
2398 }
2399
scan_curseg_cache(struct f2fs_sb_info * sbi)2400 static void scan_curseg_cache(struct f2fs_sb_info *sbi)
2401 {
2402 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2403 struct f2fs_journal *journal = curseg->journal;
2404 int i;
2405
2406 down_read(&curseg->journal_rwsem);
2407 for (i = 0; i < nats_in_cursum(journal); i++) {
2408 block_t addr;
2409 nid_t nid;
2410
2411 addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
2412 nid = le32_to_cpu(nid_in_journal(journal, i));
2413 if (addr == NULL_ADDR)
2414 add_free_nid(sbi, nid, true, false);
2415 else
2416 remove_free_nid(sbi, nid);
2417 }
2418 up_read(&curseg->journal_rwsem);
2419 }
2420
scan_free_nid_bits(struct f2fs_sb_info * sbi)2421 static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
2422 {
2423 struct f2fs_nm_info *nm_i = NM_I(sbi);
2424 unsigned int i, idx;
2425 nid_t nid;
2426
2427 f2fs_down_read(&nm_i->nat_tree_lock);
2428
2429 for (i = 0; i < nm_i->nat_blocks; i++) {
2430 if (!test_bit_le(i, nm_i->nat_block_bitmap))
2431 continue;
2432 if (!nm_i->free_nid_count[i])
2433 continue;
2434 for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) {
2435 idx = find_next_bit_le(nm_i->free_nid_bitmap[i],
2436 NAT_ENTRY_PER_BLOCK, idx);
2437 if (idx >= NAT_ENTRY_PER_BLOCK)
2438 break;
2439
2440 nid = i * NAT_ENTRY_PER_BLOCK + idx;
2441 add_free_nid(sbi, nid, true, false);
2442
2443 if (nm_i->nid_cnt[FREE_NID] >= MAX_FREE_NIDS)
2444 goto out;
2445 }
2446 }
2447 out:
2448 scan_curseg_cache(sbi);
2449
2450 f2fs_up_read(&nm_i->nat_tree_lock);
2451 }
2452
__f2fs_build_free_nids(struct f2fs_sb_info * sbi,bool sync,bool mount)2453 static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
2454 bool sync, bool mount)
2455 {
2456 struct f2fs_nm_info *nm_i = NM_I(sbi);
2457 int i = 0, ret;
2458 nid_t nid = nm_i->next_scan_nid;
2459
2460 if (unlikely(nid >= nm_i->max_nid))
2461 nid = 0;
2462
2463 if (unlikely(nid % NAT_ENTRY_PER_BLOCK))
2464 nid = NAT_BLOCK_OFFSET(nid) * NAT_ENTRY_PER_BLOCK;
2465
2466 /* Enough entries */
2467 if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2468 return 0;
2469
2470 if (!sync && !f2fs_available_free_memory(sbi, FREE_NIDS))
2471 return 0;
2472
2473 if (!mount) {
2474 /* try to find free nids in free_nid_bitmap */
2475 scan_free_nid_bits(sbi);
2476
2477 if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2478 return 0;
2479 }
2480
2481 /* readahead nat pages to be scanned */
2482 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
2483 META_NAT, true);
2484
2485 f2fs_down_read(&nm_i->nat_tree_lock);
2486
2487 while (1) {
2488 if (!test_bit_le(NAT_BLOCK_OFFSET(nid),
2489 nm_i->nat_block_bitmap)) {
2490 struct page *page = get_current_nat_page(sbi, nid);
2491
2492 if (IS_ERR(page)) {
2493 ret = PTR_ERR(page);
2494 } else {
2495 ret = scan_nat_page(sbi, page, nid);
2496 f2fs_put_page(page, 1);
2497 }
2498
2499 if (ret) {
2500 f2fs_up_read(&nm_i->nat_tree_lock);
2501 f2fs_err(sbi, "NAT is corrupt, run fsck to fix it");
2502 return ret;
2503 }
2504 }
2505
2506 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
2507 if (unlikely(nid >= nm_i->max_nid))
2508 nid = 0;
2509
2510 if (++i >= FREE_NID_PAGES)
2511 break;
2512 }
2513
2514 /* go to the next free nat pages to find free nids abundantly */
2515 nm_i->next_scan_nid = nid;
2516
2517 /* find free nids from current sum_pages */
2518 scan_curseg_cache(sbi);
2519
2520 f2fs_up_read(&nm_i->nat_tree_lock);
2521
2522 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
2523 nm_i->ra_nid_pages, META_NAT, false);
2524
2525 return 0;
2526 }
2527
f2fs_build_free_nids(struct f2fs_sb_info * sbi,bool sync,bool mount)2528 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
2529 {
2530 int ret;
2531
2532 mutex_lock(&NM_I(sbi)->build_lock);
2533 ret = __f2fs_build_free_nids(sbi, sync, mount);
2534 mutex_unlock(&NM_I(sbi)->build_lock);
2535
2536 return ret;
2537 }
2538
2539 /*
2540 * If this function returns success, caller can obtain a new nid
2541 * from second parameter of this function.
2542 * The returned nid could be used ino as well as nid when inode is created.
2543 */
f2fs_alloc_nid(struct f2fs_sb_info * sbi,nid_t * nid)2544 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
2545 {
2546 struct f2fs_nm_info *nm_i = NM_I(sbi);
2547 struct free_nid *i = NULL;
2548 retry:
2549 if (time_to_inject(sbi, FAULT_ALLOC_NID)) {
2550 f2fs_show_injection_info(sbi, FAULT_ALLOC_NID);
2551 return false;
2552 }
2553
2554 spin_lock(&nm_i->nid_list_lock);
2555
2556 if (unlikely(nm_i->available_nids == 0)) {
2557 spin_unlock(&nm_i->nid_list_lock);
2558 return false;
2559 }
2560
2561 /* We should not use stale free nids created by f2fs_build_free_nids */
2562 if (nm_i->nid_cnt[FREE_NID] && !on_f2fs_build_free_nids(nm_i)) {
2563 f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
2564 i = list_first_entry(&nm_i->free_nid_list,
2565 struct free_nid, list);
2566 *nid = i->nid;
2567
2568 __move_free_nid(sbi, i, FREE_NID, PREALLOC_NID);
2569 nm_i->available_nids--;
2570
2571 update_free_nid_bitmap(sbi, *nid, false, false);
2572
2573 spin_unlock(&nm_i->nid_list_lock);
2574 return true;
2575 }
2576 spin_unlock(&nm_i->nid_list_lock);
2577
2578 /* Let's scan nat pages and its caches to get free nids */
2579 if (!f2fs_build_free_nids(sbi, true, false))
2580 goto retry;
2581 return false;
2582 }
2583
2584 /*
2585 * f2fs_alloc_nid() should be called prior to this function.
2586 */
f2fs_alloc_nid_done(struct f2fs_sb_info * sbi,nid_t nid)2587 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
2588 {
2589 struct f2fs_nm_info *nm_i = NM_I(sbi);
2590 struct free_nid *i;
2591
2592 spin_lock(&nm_i->nid_list_lock);
2593 i = __lookup_free_nid_list(nm_i, nid);
2594 f2fs_bug_on(sbi, !i);
2595 __remove_free_nid(sbi, i, PREALLOC_NID);
2596 spin_unlock(&nm_i->nid_list_lock);
2597
2598 kmem_cache_free(free_nid_slab, i);
2599 }
2600
2601 /*
2602 * f2fs_alloc_nid() should be called prior to this function.
2603 */
f2fs_alloc_nid_failed(struct f2fs_sb_info * sbi,nid_t nid)2604 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
2605 {
2606 struct f2fs_nm_info *nm_i = NM_I(sbi);
2607 struct free_nid *i;
2608 bool need_free = false;
2609
2610 if (!nid)
2611 return;
2612
2613 spin_lock(&nm_i->nid_list_lock);
2614 i = __lookup_free_nid_list(nm_i, nid);
2615 f2fs_bug_on(sbi, !i);
2616
2617 if (!f2fs_available_free_memory(sbi, FREE_NIDS)) {
2618 __remove_free_nid(sbi, i, PREALLOC_NID);
2619 need_free = true;
2620 } else {
2621 __move_free_nid(sbi, i, PREALLOC_NID, FREE_NID);
2622 }
2623
2624 nm_i->available_nids++;
2625
2626 update_free_nid_bitmap(sbi, nid, true, false);
2627
2628 spin_unlock(&nm_i->nid_list_lock);
2629
2630 if (need_free)
2631 kmem_cache_free(free_nid_slab, i);
2632 }
2633
f2fs_try_to_free_nids(struct f2fs_sb_info * sbi,int nr_shrink)2634 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
2635 {
2636 struct f2fs_nm_info *nm_i = NM_I(sbi);
2637 int nr = nr_shrink;
2638
2639 if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
2640 return 0;
2641
2642 if (!mutex_trylock(&nm_i->build_lock))
2643 return 0;
2644
2645 while (nr_shrink && nm_i->nid_cnt[FREE_NID] > MAX_FREE_NIDS) {
2646 struct free_nid *i, *next;
2647 unsigned int batch = SHRINK_NID_BATCH_SIZE;
2648
2649 spin_lock(&nm_i->nid_list_lock);
2650 list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
2651 if (!nr_shrink || !batch ||
2652 nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
2653 break;
2654 __remove_free_nid(sbi, i, FREE_NID);
2655 kmem_cache_free(free_nid_slab, i);
2656 nr_shrink--;
2657 batch--;
2658 }
2659 spin_unlock(&nm_i->nid_list_lock);
2660 }
2661
2662 mutex_unlock(&nm_i->build_lock);
2663
2664 return nr - nr_shrink;
2665 }
2666
f2fs_recover_inline_xattr(struct inode * inode,struct page * page)2667 int f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
2668 {
2669 void *src_addr, *dst_addr;
2670 size_t inline_size;
2671 struct page *ipage;
2672 struct f2fs_inode *ri;
2673
2674 ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
2675 if (IS_ERR(ipage))
2676 return PTR_ERR(ipage);
2677
2678 ri = F2FS_INODE(page);
2679 if (ri->i_inline & F2FS_INLINE_XATTR) {
2680 if (!f2fs_has_inline_xattr(inode)) {
2681 set_inode_flag(inode, FI_INLINE_XATTR);
2682 stat_inc_inline_xattr(inode);
2683 }
2684 } else {
2685 if (f2fs_has_inline_xattr(inode)) {
2686 stat_dec_inline_xattr(inode);
2687 clear_inode_flag(inode, FI_INLINE_XATTR);
2688 }
2689 goto update_inode;
2690 }
2691
2692 dst_addr = inline_xattr_addr(inode, ipage);
2693 src_addr = inline_xattr_addr(inode, page);
2694 inline_size = inline_xattr_size(inode);
2695
2696 f2fs_wait_on_page_writeback(ipage, NODE, true, true);
2697 memcpy(dst_addr, src_addr, inline_size);
2698 update_inode:
2699 f2fs_update_inode(inode, ipage);
2700 f2fs_put_page(ipage, 1);
2701 return 0;
2702 }
2703
f2fs_recover_xattr_data(struct inode * inode,struct page * page)2704 int f2fs_recover_xattr_data(struct inode *inode, struct page *page)
2705 {
2706 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2707 nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
2708 nid_t new_xnid;
2709 struct dnode_of_data dn;
2710 struct node_info ni;
2711 struct page *xpage;
2712 int err;
2713
2714 if (!prev_xnid)
2715 goto recover_xnid;
2716
2717 /* 1: invalidate the previous xattr nid */
2718 err = f2fs_get_node_info(sbi, prev_xnid, &ni, false);
2719 if (err)
2720 return err;
2721
2722 f2fs_invalidate_blocks(sbi, ni.blk_addr);
2723 dec_valid_node_count(sbi, inode, false);
2724 set_node_addr(sbi, &ni, NULL_ADDR, false);
2725
2726 recover_xnid:
2727 /* 2: update xattr nid in inode */
2728 if (!f2fs_alloc_nid(sbi, &new_xnid))
2729 return -ENOSPC;
2730
2731 set_new_dnode(&dn, inode, NULL, NULL, new_xnid);
2732 xpage = f2fs_new_node_page(&dn, XATTR_NODE_OFFSET);
2733 if (IS_ERR(xpage)) {
2734 f2fs_alloc_nid_failed(sbi, new_xnid);
2735 return PTR_ERR(xpage);
2736 }
2737
2738 f2fs_alloc_nid_done(sbi, new_xnid);
2739 f2fs_update_inode_page(inode);
2740
2741 /* 3: update and set xattr node page dirty */
2742 memcpy(F2FS_NODE(xpage), F2FS_NODE(page), VALID_XATTR_BLOCK_SIZE);
2743
2744 set_page_dirty(xpage);
2745 f2fs_put_page(xpage, 1);
2746
2747 return 0;
2748 }
2749
f2fs_recover_inode_page(struct f2fs_sb_info * sbi,struct page * page)2750 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
2751 {
2752 struct f2fs_inode *src, *dst;
2753 nid_t ino = ino_of_node(page);
2754 struct node_info old_ni, new_ni;
2755 struct page *ipage;
2756 int err;
2757
2758 err = f2fs_get_node_info(sbi, ino, &old_ni, false);
2759 if (err)
2760 return err;
2761
2762 if (unlikely(old_ni.blk_addr != NULL_ADDR))
2763 return -EINVAL;
2764 retry:
2765 ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false);
2766 if (!ipage) {
2767 memalloc_retry_wait(GFP_NOFS);
2768 goto retry;
2769 }
2770
2771 /* Should not use this inode from free nid list */
2772 remove_free_nid(sbi, ino);
2773
2774 if (!PageUptodate(ipage))
2775 SetPageUptodate(ipage);
2776 fill_node_footer(ipage, ino, ino, 0, true);
2777 set_cold_node(ipage, false);
2778
2779 src = F2FS_INODE(page);
2780 dst = F2FS_INODE(ipage);
2781
2782 memcpy(dst, src, offsetof(struct f2fs_inode, i_ext));
2783 dst->i_size = 0;
2784 dst->i_blocks = cpu_to_le64(1);
2785 dst->i_links = cpu_to_le32(1);
2786 dst->i_xattr_nid = 0;
2787 dst->i_inline = src->i_inline & (F2FS_INLINE_XATTR | F2FS_EXTRA_ATTR);
2788 if (dst->i_inline & F2FS_EXTRA_ATTR) {
2789 dst->i_extra_isize = src->i_extra_isize;
2790
2791 if (f2fs_sb_has_flexible_inline_xattr(sbi) &&
2792 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2793 i_inline_xattr_size))
2794 dst->i_inline_xattr_size = src->i_inline_xattr_size;
2795
2796 if (f2fs_sb_has_project_quota(sbi) &&
2797 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2798 i_projid))
2799 dst->i_projid = src->i_projid;
2800
2801 if (f2fs_sb_has_inode_crtime(sbi) &&
2802 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2803 i_crtime_nsec)) {
2804 dst->i_crtime = src->i_crtime;
2805 dst->i_crtime_nsec = src->i_crtime_nsec;
2806 }
2807 }
2808
2809 new_ni = old_ni;
2810 new_ni.ino = ino;
2811
2812 if (unlikely(inc_valid_node_count(sbi, NULL, true)))
2813 WARN_ON(1);
2814 set_node_addr(sbi, &new_ni, NEW_ADDR, false);
2815 inc_valid_inode_count(sbi);
2816 set_page_dirty(ipage);
2817 f2fs_put_page(ipage, 1);
2818 return 0;
2819 }
2820
f2fs_restore_node_summary(struct f2fs_sb_info * sbi,unsigned int segno,struct f2fs_summary_block * sum)2821 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
2822 unsigned int segno, struct f2fs_summary_block *sum)
2823 {
2824 struct f2fs_node *rn;
2825 struct f2fs_summary *sum_entry;
2826 block_t addr;
2827 int i, idx, last_offset, nrpages;
2828
2829 /* scan the node segment */
2830 last_offset = sbi->blocks_per_seg;
2831 addr = START_BLOCK(sbi, segno);
2832 sum_entry = &sum->entries[0];
2833
2834 for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
2835 nrpages = bio_max_segs(last_offset - i);
2836
2837 /* readahead node pages */
2838 f2fs_ra_meta_pages(sbi, addr, nrpages, META_POR, true);
2839
2840 for (idx = addr; idx < addr + nrpages; idx++) {
2841 struct page *page = f2fs_get_tmp_page(sbi, idx);
2842
2843 if (IS_ERR(page))
2844 return PTR_ERR(page);
2845
2846 rn = F2FS_NODE(page);
2847 sum_entry->nid = rn->footer.nid;
2848 sum_entry->version = 0;
2849 sum_entry->ofs_in_node = 0;
2850 sum_entry++;
2851 f2fs_put_page(page, 1);
2852 }
2853
2854 invalidate_mapping_pages(META_MAPPING(sbi), addr,
2855 addr + nrpages);
2856 }
2857 return 0;
2858 }
2859
remove_nats_in_journal(struct f2fs_sb_info * sbi)2860 static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
2861 {
2862 struct f2fs_nm_info *nm_i = NM_I(sbi);
2863 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2864 struct f2fs_journal *journal = curseg->journal;
2865 int i;
2866
2867 down_write(&curseg->journal_rwsem);
2868 for (i = 0; i < nats_in_cursum(journal); i++) {
2869 struct nat_entry *ne;
2870 struct f2fs_nat_entry raw_ne;
2871 nid_t nid = le32_to_cpu(nid_in_journal(journal, i));
2872
2873 if (f2fs_check_nid_range(sbi, nid))
2874 continue;
2875
2876 raw_ne = nat_in_journal(journal, i);
2877
2878 ne = __lookup_nat_cache(nm_i, nid);
2879 if (!ne) {
2880 ne = __alloc_nat_entry(sbi, nid, true);
2881 __init_nat_entry(nm_i, ne, &raw_ne, true);
2882 }
2883
2884 /*
2885 * if a free nat in journal has not been used after last
2886 * checkpoint, we should remove it from available nids,
2887 * since later we will add it again.
2888 */
2889 if (!get_nat_flag(ne, IS_DIRTY) &&
2890 le32_to_cpu(raw_ne.block_addr) == NULL_ADDR) {
2891 spin_lock(&nm_i->nid_list_lock);
2892 nm_i->available_nids--;
2893 spin_unlock(&nm_i->nid_list_lock);
2894 }
2895
2896 __set_nat_cache_dirty(nm_i, ne);
2897 }
2898 update_nats_in_cursum(journal, -i);
2899 up_write(&curseg->journal_rwsem);
2900 }
2901
__adjust_nat_entry_set(struct nat_entry_set * nes,struct list_head * head,int max)2902 static void __adjust_nat_entry_set(struct nat_entry_set *nes,
2903 struct list_head *head, int max)
2904 {
2905 struct nat_entry_set *cur;
2906
2907 if (nes->entry_cnt >= max)
2908 goto add_out;
2909
2910 list_for_each_entry(cur, head, set_list) {
2911 if (cur->entry_cnt >= nes->entry_cnt) {
2912 list_add(&nes->set_list, cur->set_list.prev);
2913 return;
2914 }
2915 }
2916 add_out:
2917 list_add_tail(&nes->set_list, head);
2918 }
2919
__update_nat_bits(struct f2fs_nm_info * nm_i,unsigned int nat_ofs,unsigned int valid)2920 static void __update_nat_bits(struct f2fs_nm_info *nm_i, unsigned int nat_ofs,
2921 unsigned int valid)
2922 {
2923 if (valid == 0) {
2924 __set_bit_le(nat_ofs, nm_i->empty_nat_bits);
2925 __clear_bit_le(nat_ofs, nm_i->full_nat_bits);
2926 return;
2927 }
2928
2929 __clear_bit_le(nat_ofs, nm_i->empty_nat_bits);
2930 if (valid == NAT_ENTRY_PER_BLOCK)
2931 __set_bit_le(nat_ofs, nm_i->full_nat_bits);
2932 else
2933 __clear_bit_le(nat_ofs, nm_i->full_nat_bits);
2934 }
2935
update_nat_bits(struct f2fs_sb_info * sbi,nid_t start_nid,struct page * page)2936 static void update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
2937 struct page *page)
2938 {
2939 struct f2fs_nm_info *nm_i = NM_I(sbi);
2940 unsigned int nat_index = start_nid / NAT_ENTRY_PER_BLOCK;
2941 struct f2fs_nat_block *nat_blk = page_address(page);
2942 int valid = 0;
2943 int i = 0;
2944
2945 if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG))
2946 return;
2947
2948 if (nat_index == 0) {
2949 valid = 1;
2950 i = 1;
2951 }
2952 for (; i < NAT_ENTRY_PER_BLOCK; i++) {
2953 if (le32_to_cpu(nat_blk->entries[i].block_addr) != NULL_ADDR)
2954 valid++;
2955 }
2956
2957 __update_nat_bits(nm_i, nat_index, valid);
2958 }
2959
f2fs_enable_nat_bits(struct f2fs_sb_info * sbi)2960 void f2fs_enable_nat_bits(struct f2fs_sb_info *sbi)
2961 {
2962 struct f2fs_nm_info *nm_i = NM_I(sbi);
2963 unsigned int nat_ofs;
2964
2965 f2fs_down_read(&nm_i->nat_tree_lock);
2966
2967 for (nat_ofs = 0; nat_ofs < nm_i->nat_blocks; nat_ofs++) {
2968 unsigned int valid = 0, nid_ofs = 0;
2969
2970 /* handle nid zero due to it should never be used */
2971 if (unlikely(nat_ofs == 0)) {
2972 valid = 1;
2973 nid_ofs = 1;
2974 }
2975
2976 for (; nid_ofs < NAT_ENTRY_PER_BLOCK; nid_ofs++) {
2977 if (!test_bit_le(nid_ofs,
2978 nm_i->free_nid_bitmap[nat_ofs]))
2979 valid++;
2980 }
2981
2982 __update_nat_bits(nm_i, nat_ofs, valid);
2983 }
2984
2985 f2fs_up_read(&nm_i->nat_tree_lock);
2986 }
2987
__flush_nat_entry_set(struct f2fs_sb_info * sbi,struct nat_entry_set * set,struct cp_control * cpc)2988 static int __flush_nat_entry_set(struct f2fs_sb_info *sbi,
2989 struct nat_entry_set *set, struct cp_control *cpc)
2990 {
2991 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2992 struct f2fs_journal *journal = curseg->journal;
2993 nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK;
2994 bool to_journal = true;
2995 struct f2fs_nat_block *nat_blk;
2996 struct nat_entry *ne, *cur;
2997 struct page *page = NULL;
2998
2999 /*
3000 * there are two steps to flush nat entries:
3001 * #1, flush nat entries to journal in current hot data summary block.
3002 * #2, flush nat entries to nat page.
3003 */
3004 if ((cpc->reason & CP_UMOUNT) ||
3005 !__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL))
3006 to_journal = false;
3007
3008 if (to_journal) {
3009 down_write(&curseg->journal_rwsem);
3010 } else {
3011 page = get_next_nat_page(sbi, start_nid);
3012 if (IS_ERR(page))
3013 return PTR_ERR(page);
3014
3015 nat_blk = page_address(page);
3016 f2fs_bug_on(sbi, !nat_blk);
3017 }
3018
3019 /* flush dirty nats in nat entry set */
3020 list_for_each_entry_safe(ne, cur, &set->entry_list, list) {
3021 struct f2fs_nat_entry *raw_ne;
3022 nid_t nid = nat_get_nid(ne);
3023 int offset;
3024
3025 f2fs_bug_on(sbi, nat_get_blkaddr(ne) == NEW_ADDR);
3026
3027 if (to_journal) {
3028 offset = f2fs_lookup_journal_in_cursum(journal,
3029 NAT_JOURNAL, nid, 1);
3030 f2fs_bug_on(sbi, offset < 0);
3031 raw_ne = &nat_in_journal(journal, offset);
3032 nid_in_journal(journal, offset) = cpu_to_le32(nid);
3033 } else {
3034 raw_ne = &nat_blk->entries[nid - start_nid];
3035 }
3036 raw_nat_from_node_info(raw_ne, &ne->ni);
3037 nat_reset_flag(ne);
3038 __clear_nat_cache_dirty(NM_I(sbi), set, ne);
3039 if (nat_get_blkaddr(ne) == NULL_ADDR) {
3040 add_free_nid(sbi, nid, false, true);
3041 } else {
3042 spin_lock(&NM_I(sbi)->nid_list_lock);
3043 update_free_nid_bitmap(sbi, nid, false, false);
3044 spin_unlock(&NM_I(sbi)->nid_list_lock);
3045 }
3046 }
3047
3048 if (to_journal) {
3049 up_write(&curseg->journal_rwsem);
3050 } else {
3051 update_nat_bits(sbi, start_nid, page);
3052 f2fs_put_page(page, 1);
3053 }
3054
3055 /* Allow dirty nats by node block allocation in write_begin */
3056 if (!set->entry_cnt) {
3057 radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
3058 kmem_cache_free(nat_entry_set_slab, set);
3059 }
3060 return 0;
3061 }
3062
3063 /*
3064 * This function is called during the checkpointing process.
3065 */
f2fs_flush_nat_entries(struct f2fs_sb_info * sbi,struct cp_control * cpc)3066 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
3067 {
3068 struct f2fs_nm_info *nm_i = NM_I(sbi);
3069 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
3070 struct f2fs_journal *journal = curseg->journal;
3071 struct nat_entry_set *setvec[SETVEC_SIZE];
3072 struct nat_entry_set *set, *tmp;
3073 unsigned int found;
3074 nid_t set_idx = 0;
3075 LIST_HEAD(sets);
3076 int err = 0;
3077
3078 /*
3079 * during unmount, let's flush nat_bits before checking
3080 * nat_cnt[DIRTY_NAT].
3081 */
3082 if (cpc->reason & CP_UMOUNT) {
3083 f2fs_down_write(&nm_i->nat_tree_lock);
3084 remove_nats_in_journal(sbi);
3085 f2fs_up_write(&nm_i->nat_tree_lock);
3086 }
3087
3088 if (!nm_i->nat_cnt[DIRTY_NAT])
3089 return 0;
3090
3091 f2fs_down_write(&nm_i->nat_tree_lock);
3092
3093 /*
3094 * if there are no enough space in journal to store dirty nat
3095 * entries, remove all entries from journal and merge them
3096 * into nat entry set.
3097 */
3098 if (cpc->reason & CP_UMOUNT ||
3099 !__has_cursum_space(journal,
3100 nm_i->nat_cnt[DIRTY_NAT], NAT_JOURNAL))
3101 remove_nats_in_journal(sbi);
3102
3103 while ((found = __gang_lookup_nat_set(nm_i,
3104 set_idx, SETVEC_SIZE, setvec))) {
3105 unsigned idx;
3106
3107 set_idx = setvec[found - 1]->set + 1;
3108 for (idx = 0; idx < found; idx++)
3109 __adjust_nat_entry_set(setvec[idx], &sets,
3110 MAX_NAT_JENTRIES(journal));
3111 }
3112
3113 /* flush dirty nats in nat entry set */
3114 list_for_each_entry_safe(set, tmp, &sets, set_list) {
3115 err = __flush_nat_entry_set(sbi, set, cpc);
3116 if (err)
3117 break;
3118 }
3119
3120 f2fs_up_write(&nm_i->nat_tree_lock);
3121 /* Allow dirty nats by node block allocation in write_begin */
3122
3123 return err;
3124 }
3125
__get_nat_bitmaps(struct f2fs_sb_info * sbi)3126 static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
3127 {
3128 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3129 struct f2fs_nm_info *nm_i = NM_I(sbi);
3130 unsigned int nat_bits_bytes = nm_i->nat_blocks / BITS_PER_BYTE;
3131 unsigned int i;
3132 __u64 cp_ver = cur_cp_version(ckpt);
3133 block_t nat_bits_addr;
3134
3135 nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
3136 nm_i->nat_bits = f2fs_kvzalloc(sbi,
3137 nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL);
3138 if (!nm_i->nat_bits)
3139 return -ENOMEM;
3140
3141 nm_i->full_nat_bits = nm_i->nat_bits + 8;
3142 nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes;
3143
3144 if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG))
3145 return 0;
3146
3147 nat_bits_addr = __start_cp_addr(sbi) + sbi->blocks_per_seg -
3148 nm_i->nat_bits_blocks;
3149 for (i = 0; i < nm_i->nat_bits_blocks; i++) {
3150 struct page *page;
3151
3152 page = f2fs_get_meta_page(sbi, nat_bits_addr++);
3153 if (IS_ERR(page))
3154 return PTR_ERR(page);
3155
3156 memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS),
3157 page_address(page), F2FS_BLKSIZE);
3158 f2fs_put_page(page, 1);
3159 }
3160
3161 cp_ver |= (cur_cp_crc(ckpt) << 32);
3162 if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) {
3163 clear_ckpt_flags(sbi, CP_NAT_BITS_FLAG);
3164 f2fs_notice(sbi, "Disable nat_bits due to incorrect cp_ver (%llu, %llu)",
3165 cp_ver, le64_to_cpu(*(__le64 *)nm_i->nat_bits));
3166 return 0;
3167 }
3168
3169 f2fs_notice(sbi, "Found nat_bits in checkpoint");
3170 return 0;
3171 }
3172
load_free_nid_bitmap(struct f2fs_sb_info * sbi)3173 static inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi)
3174 {
3175 struct f2fs_nm_info *nm_i = NM_I(sbi);
3176 unsigned int i = 0;
3177 nid_t nid, last_nid;
3178
3179 if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG))
3180 return;
3181
3182 for (i = 0; i < nm_i->nat_blocks; i++) {
3183 i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
3184 if (i >= nm_i->nat_blocks)
3185 break;
3186
3187 __set_bit_le(i, nm_i->nat_block_bitmap);
3188
3189 nid = i * NAT_ENTRY_PER_BLOCK;
3190 last_nid = nid + NAT_ENTRY_PER_BLOCK;
3191
3192 spin_lock(&NM_I(sbi)->nid_list_lock);
3193 for (; nid < last_nid; nid++)
3194 update_free_nid_bitmap(sbi, nid, true, true);
3195 spin_unlock(&NM_I(sbi)->nid_list_lock);
3196 }
3197
3198 for (i = 0; i < nm_i->nat_blocks; i++) {
3199 i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
3200 if (i >= nm_i->nat_blocks)
3201 break;
3202
3203 __set_bit_le(i, nm_i->nat_block_bitmap);
3204 }
3205 }
3206
init_node_manager(struct f2fs_sb_info * sbi)3207 static int init_node_manager(struct f2fs_sb_info *sbi)
3208 {
3209 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
3210 struct f2fs_nm_info *nm_i = NM_I(sbi);
3211 unsigned char *version_bitmap;
3212 unsigned int nat_segs;
3213 int err;
3214
3215 nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
3216
3217 /* segment_count_nat includes pair segment so divide to 2. */
3218 nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
3219 nm_i->nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
3220 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks;
3221
3222 /* not used nids: 0, node, meta, (and root counted as valid node) */
3223 nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count -
3224 F2FS_RESERVED_NODE_NUM;
3225 nm_i->nid_cnt[FREE_NID] = 0;
3226 nm_i->nid_cnt[PREALLOC_NID] = 0;
3227 nm_i->ram_thresh = DEF_RAM_THRESHOLD;
3228 nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
3229 nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
3230 nm_i->max_rf_node_blocks = DEF_RF_NODE_BLOCKS;
3231
3232 INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
3233 INIT_LIST_HEAD(&nm_i->free_nid_list);
3234 INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
3235 INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
3236 INIT_LIST_HEAD(&nm_i->nat_entries);
3237 spin_lock_init(&nm_i->nat_list_lock);
3238
3239 mutex_init(&nm_i->build_lock);
3240 spin_lock_init(&nm_i->nid_list_lock);
3241 init_f2fs_rwsem(&nm_i->nat_tree_lock);
3242
3243 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
3244 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
3245 version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
3246 nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
3247 GFP_KERNEL);
3248 if (!nm_i->nat_bitmap)
3249 return -ENOMEM;
3250
3251 err = __get_nat_bitmaps(sbi);
3252 if (err)
3253 return err;
3254
3255 #ifdef CONFIG_F2FS_CHECK_FS
3256 nm_i->nat_bitmap_mir = kmemdup(version_bitmap, nm_i->bitmap_size,
3257 GFP_KERNEL);
3258 if (!nm_i->nat_bitmap_mir)
3259 return -ENOMEM;
3260 #endif
3261
3262 return 0;
3263 }
3264
init_free_nid_cache(struct f2fs_sb_info * sbi)3265 static int init_free_nid_cache(struct f2fs_sb_info *sbi)
3266 {
3267 struct f2fs_nm_info *nm_i = NM_I(sbi);
3268 int i;
3269
3270 nm_i->free_nid_bitmap =
3271 f2fs_kvzalloc(sbi, array_size(sizeof(unsigned char *),
3272 nm_i->nat_blocks),
3273 GFP_KERNEL);
3274 if (!nm_i->free_nid_bitmap)
3275 return -ENOMEM;
3276
3277 for (i = 0; i < nm_i->nat_blocks; i++) {
3278 nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi,
3279 f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK), GFP_KERNEL);
3280 if (!nm_i->free_nid_bitmap[i])
3281 return -ENOMEM;
3282 }
3283
3284 nm_i->nat_block_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks / 8,
3285 GFP_KERNEL);
3286 if (!nm_i->nat_block_bitmap)
3287 return -ENOMEM;
3288
3289 nm_i->free_nid_count =
3290 f2fs_kvzalloc(sbi, array_size(sizeof(unsigned short),
3291 nm_i->nat_blocks),
3292 GFP_KERNEL);
3293 if (!nm_i->free_nid_count)
3294 return -ENOMEM;
3295 return 0;
3296 }
3297
f2fs_build_node_manager(struct f2fs_sb_info * sbi)3298 int f2fs_build_node_manager(struct f2fs_sb_info *sbi)
3299 {
3300 int err;
3301
3302 sbi->nm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_nm_info),
3303 GFP_KERNEL);
3304 if (!sbi->nm_info)
3305 return -ENOMEM;
3306
3307 err = init_node_manager(sbi);
3308 if (err)
3309 return err;
3310
3311 err = init_free_nid_cache(sbi);
3312 if (err)
3313 return err;
3314
3315 /* load free nid status from nat_bits table */
3316 load_free_nid_bitmap(sbi);
3317
3318 return f2fs_build_free_nids(sbi, true, true);
3319 }
3320
f2fs_destroy_node_manager(struct f2fs_sb_info * sbi)3321 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
3322 {
3323 struct f2fs_nm_info *nm_i = NM_I(sbi);
3324 struct free_nid *i, *next_i;
3325 struct nat_entry *natvec[NATVEC_SIZE];
3326 struct nat_entry_set *setvec[SETVEC_SIZE];
3327 nid_t nid = 0;
3328 unsigned int found;
3329
3330 if (!nm_i)
3331 return;
3332
3333 /* destroy free nid list */
3334 spin_lock(&nm_i->nid_list_lock);
3335 list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
3336 __remove_free_nid(sbi, i, FREE_NID);
3337 spin_unlock(&nm_i->nid_list_lock);
3338 kmem_cache_free(free_nid_slab, i);
3339 spin_lock(&nm_i->nid_list_lock);
3340 }
3341 f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID]);
3342 f2fs_bug_on(sbi, nm_i->nid_cnt[PREALLOC_NID]);
3343 f2fs_bug_on(sbi, !list_empty(&nm_i->free_nid_list));
3344 spin_unlock(&nm_i->nid_list_lock);
3345
3346 /* destroy nat cache */
3347 f2fs_down_write(&nm_i->nat_tree_lock);
3348 while ((found = __gang_lookup_nat_cache(nm_i,
3349 nid, NATVEC_SIZE, natvec))) {
3350 unsigned idx;
3351
3352 nid = nat_get_nid(natvec[found - 1]) + 1;
3353 for (idx = 0; idx < found; idx++) {
3354 spin_lock(&nm_i->nat_list_lock);
3355 list_del(&natvec[idx]->list);
3356 spin_unlock(&nm_i->nat_list_lock);
3357
3358 __del_from_nat_cache(nm_i, natvec[idx]);
3359 }
3360 }
3361 f2fs_bug_on(sbi, nm_i->nat_cnt[TOTAL_NAT]);
3362
3363 /* destroy nat set cache */
3364 nid = 0;
3365 while ((found = __gang_lookup_nat_set(nm_i,
3366 nid, SETVEC_SIZE, setvec))) {
3367 unsigned idx;
3368
3369 nid = setvec[found - 1]->set + 1;
3370 for (idx = 0; idx < found; idx++) {
3371 /* entry_cnt is not zero, when cp_error was occurred */
3372 f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list));
3373 radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set);
3374 kmem_cache_free(nat_entry_set_slab, setvec[idx]);
3375 }
3376 }
3377 f2fs_up_write(&nm_i->nat_tree_lock);
3378
3379 kvfree(nm_i->nat_block_bitmap);
3380 if (nm_i->free_nid_bitmap) {
3381 int i;
3382
3383 for (i = 0; i < nm_i->nat_blocks; i++)
3384 kvfree(nm_i->free_nid_bitmap[i]);
3385 kvfree(nm_i->free_nid_bitmap);
3386 }
3387 kvfree(nm_i->free_nid_count);
3388
3389 kvfree(nm_i->nat_bitmap);
3390 kvfree(nm_i->nat_bits);
3391 #ifdef CONFIG_F2FS_CHECK_FS
3392 kvfree(nm_i->nat_bitmap_mir);
3393 #endif
3394 sbi->nm_info = NULL;
3395 kfree(nm_i);
3396 }
3397
f2fs_create_node_manager_caches(void)3398 int __init f2fs_create_node_manager_caches(void)
3399 {
3400 nat_entry_slab = f2fs_kmem_cache_create("f2fs_nat_entry",
3401 sizeof(struct nat_entry));
3402 if (!nat_entry_slab)
3403 goto fail;
3404
3405 free_nid_slab = f2fs_kmem_cache_create("f2fs_free_nid",
3406 sizeof(struct free_nid));
3407 if (!free_nid_slab)
3408 goto destroy_nat_entry;
3409
3410 nat_entry_set_slab = f2fs_kmem_cache_create("f2fs_nat_entry_set",
3411 sizeof(struct nat_entry_set));
3412 if (!nat_entry_set_slab)
3413 goto destroy_free_nid;
3414
3415 fsync_node_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_node_entry",
3416 sizeof(struct fsync_node_entry));
3417 if (!fsync_node_entry_slab)
3418 goto destroy_nat_entry_set;
3419 return 0;
3420
3421 destroy_nat_entry_set:
3422 kmem_cache_destroy(nat_entry_set_slab);
3423 destroy_free_nid:
3424 kmem_cache_destroy(free_nid_slab);
3425 destroy_nat_entry:
3426 kmem_cache_destroy(nat_entry_slab);
3427 fail:
3428 return -ENOMEM;
3429 }
3430
f2fs_destroy_node_manager_caches(void)3431 void f2fs_destroy_node_manager_caches(void)
3432 {
3433 kmem_cache_destroy(fsync_node_entry_slab);
3434 kmem_cache_destroy(nat_entry_set_slab);
3435 kmem_cache_destroy(free_nid_slab);
3436 kmem_cache_destroy(nat_entry_slab);
3437 }
3438