17c1a000dSChao Yu // SPDX-License-Identifier: GPL-2.0
20a8165d7SJaegeuk Kim /*
3e05df3b1SJaegeuk Kim * fs/f2fs/node.c
4e05df3b1SJaegeuk Kim *
5e05df3b1SJaegeuk Kim * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6e05df3b1SJaegeuk Kim * http://www.samsung.com/
7e05df3b1SJaegeuk Kim */
8e05df3b1SJaegeuk Kim #include <linux/fs.h>
9e05df3b1SJaegeuk Kim #include <linux/f2fs_fs.h>
10e05df3b1SJaegeuk Kim #include <linux/mpage.h>
114034247aSNeilBrown #include <linux/sched/mm.h>
12e05df3b1SJaegeuk Kim #include <linux/blkdev.h>
13e05df3b1SJaegeuk Kim #include <linux/pagevec.h>
14e05df3b1SJaegeuk Kim #include <linux/swap.h>
15e05df3b1SJaegeuk Kim
16e05df3b1SJaegeuk Kim #include "f2fs.h"
17e05df3b1SJaegeuk Kim #include "node.h"
18e05df3b1SJaegeuk Kim #include "segment.h"
1987905682SYunlei He #include "xattr.h"
2052118743SDaeho Jeong #include "iostat.h"
2151dd6249SNamjae Jeon #include <trace/events/f2fs.h>
22e05df3b1SJaegeuk Kim
23d1e1ff97SJulian Sun #define on_f2fs_build_free_nids(nm_i) mutex_is_locked(&(nm_i)->build_lock)
24f978f5a0SGu Zheng
25e05df3b1SJaegeuk Kim static struct kmem_cache *nat_entry_slab;
26e05df3b1SJaegeuk Kim static struct kmem_cache *free_nid_slab;
27aec71382SChao Yu static struct kmem_cache *nat_entry_set_slab;
2850fa53ecSChao Yu static struct kmem_cache *fsync_node_entry_slab;
29e05df3b1SJaegeuk Kim
30a4f843bdSJaegeuk Kim /*
31a4f843bdSJaegeuk Kim * Check whether the given nid is within node id range.
32a4f843bdSJaegeuk Kim */
f2fs_check_nid_range(struct f2fs_sb_info * sbi,nid_t nid)334d57b86dSChao Yu int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
34a4f843bdSJaegeuk Kim {
35a4f843bdSJaegeuk Kim if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) {
36a4f843bdSJaegeuk Kim set_sbi_flag(sbi, SBI_NEED_FSCK);
37dcbb4c10SJoe Perches f2fs_warn(sbi, "%s: out-of-range nid=%x, run fsck to fix.",
38a4f843bdSJaegeuk Kim __func__, nid);
3995fa90c9SChao Yu f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE);
4010f966bbSChao Yu return -EFSCORRUPTED;
41a4f843bdSJaegeuk Kim }
42a4f843bdSJaegeuk Kim return 0;
43a4f843bdSJaegeuk Kim }
44a4f843bdSJaegeuk Kim
f2fs_available_free_memory(struct f2fs_sb_info * sbi,int type)454d57b86dSChao Yu bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
46cdfc41c1SJaegeuk Kim {
476fb03f3aSJaegeuk Kim struct f2fs_nm_info *nm_i = NM_I(sbi);
48d6d2b491SSahitya Tummala struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
49cdfc41c1SJaegeuk Kim struct sysinfo val;
50e5e7ea3cSJaegeuk Kim unsigned long avail_ram;
51cdfc41c1SJaegeuk Kim unsigned long mem_size = 0;
526fb03f3aSJaegeuk Kim bool res = false;
53cdfc41c1SJaegeuk Kim
54d6d2b491SSahitya Tummala if (!nm_i)
55d6d2b491SSahitya Tummala return true;
56d6d2b491SSahitya Tummala
57cdfc41c1SJaegeuk Kim si_meminfo(&val);
58e5e7ea3cSJaegeuk Kim
59e5e7ea3cSJaegeuk Kim /* only uses low memory */
60e5e7ea3cSJaegeuk Kim avail_ram = val.totalram - val.totalhigh;
61e5e7ea3cSJaegeuk Kim
62429511cdSChao Yu /*
6371644dffSJaegeuk Kim * give 25%, 25%, 50%, 50%, 25%, 25% memory for each components respectively
64429511cdSChao Yu */
656fb03f3aSJaegeuk Kim if (type == FREE_NIDS) {
669a4ffdf5SChao Yu mem_size = (nm_i->nid_cnt[FREE_NID] *
67b8559dc2SChao Yu sizeof(struct free_nid)) >> PAGE_SHIFT;
68e5e7ea3cSJaegeuk Kim res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
696fb03f3aSJaegeuk Kim } else if (type == NAT_ENTRIES) {
70a95ba66aSJaegeuk Kim mem_size = (nm_i->nat_cnt[TOTAL_NAT] *
71a95ba66aSJaegeuk Kim sizeof(struct nat_entry)) >> PAGE_SHIFT;
72e5e7ea3cSJaegeuk Kim res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
73e589c2c4SJaegeuk Kim if (excess_cached_nats(sbi))
74e589c2c4SJaegeuk Kim res = false;
75a1257023SJaegeuk Kim } else if (type == DIRTY_DENTS) {
76a1257023SJaegeuk Kim if (sbi->sb->s_bdi->wb.dirty_exceeded)
77a1257023SJaegeuk Kim return false;
78a1257023SJaegeuk Kim mem_size = get_pages(sbi, F2FS_DIRTY_DENTS);
79a1257023SJaegeuk Kim res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
80e5e7ea3cSJaegeuk Kim } else if (type == INO_ENTRIES) {
81e5e7ea3cSJaegeuk Kim int i;
82e5e7ea3cSJaegeuk Kim
8339d787beSChao Yu for (i = 0; i < MAX_INO_ENTRY; i++)
848f73cbb7SKinglong Mee mem_size += sbi->im[i].ino_num *
858f73cbb7SKinglong Mee sizeof(struct ino_entry);
868f73cbb7SKinglong Mee mem_size >>= PAGE_SHIFT;
87e5e7ea3cSJaegeuk Kim res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
8871644dffSJaegeuk Kim } else if (type == READ_EXTENT_CACHE || type == AGE_EXTENT_CACHE) {
8971644dffSJaegeuk Kim enum extent_type etype = type == READ_EXTENT_CACHE ?
9071644dffSJaegeuk Kim EX_READ : EX_BLOCK_AGE;
9171644dffSJaegeuk Kim struct extent_tree_info *eti = &sbi->extent_tree[etype];
92e7547dacSJaegeuk Kim
93e7547dacSJaegeuk Kim mem_size = (atomic_read(&eti->total_ext_tree) *
947441ccefSJaegeuk Kim sizeof(struct extent_tree) +
95e7547dacSJaegeuk Kim atomic_read(&eti->total_ext_node) *
9609cbfeafSKirill A. Shutemov sizeof(struct extent_node)) >> PAGE_SHIFT;
9771644dffSJaegeuk Kim res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
98d6d2b491SSahitya Tummala } else if (type == DISCARD_CACHE) {
99d6d2b491SSahitya Tummala mem_size = (atomic_read(&dcc->discard_cmd_cnt) *
100d6d2b491SSahitya Tummala sizeof(struct discard_cmd)) >> PAGE_SHIFT;
101d6d2b491SSahitya Tummala res = mem_size < (avail_ram * nm_i->ram_thresh / 100);
1026ce19affSChao Yu } else if (type == COMPRESS_PAGE) {
1036ce19affSChao Yu #ifdef CONFIG_F2FS_FS_COMPRESSION
1046ce19affSChao Yu unsigned long free_ram = val.freeram;
1056ce19affSChao Yu
1066ce19affSChao Yu /*
1076ce19affSChao Yu * free memory is lower than watermark or cached page count
1086ce19affSChao Yu * exceed threshold, deny caching compress page.
1096ce19affSChao Yu */
1106ce19affSChao Yu res = (free_ram > avail_ram * sbi->compress_watermark / 100) &&
1116ce19affSChao Yu (COMPRESS_MAPPING(sbi)->nrpages <
1126ce19affSChao Yu free_ram * sbi->compress_percent / 100);
1136ce19affSChao Yu #else
1146ce19affSChao Yu res = false;
1156ce19affSChao Yu #endif
1161e84371fSJaegeuk Kim } else {
1171663cae4SJaegeuk Kim if (!sbi->sb->s_bdi->wb.dirty_exceeded)
1181663cae4SJaegeuk Kim return true;
1196fb03f3aSJaegeuk Kim }
1206fb03f3aSJaegeuk Kim return res;
121cdfc41c1SJaegeuk Kim }
122cdfc41c1SJaegeuk Kim
clear_node_page_dirty(struct page * page)123e05df3b1SJaegeuk Kim static void clear_node_page_dirty(struct page *page)
124e05df3b1SJaegeuk Kim {
125e05df3b1SJaegeuk Kim if (PageDirty(page)) {
126fd3a11afSChao Yu f2fs_clear_page_cache_dirty_tag(page_folio(page));
127e05df3b1SJaegeuk Kim clear_page_dirty_for_io(page);
128aec2f729SChao Yu dec_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
129e05df3b1SJaegeuk Kim }
130e05df3b1SJaegeuk Kim ClearPageUptodate(page);
131e05df3b1SJaegeuk Kim }
132e05df3b1SJaegeuk Kim
get_current_nat_page(struct f2fs_sb_info * sbi,nid_t nid)133e05df3b1SJaegeuk Kim static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
134e05df3b1SJaegeuk Kim {
1353acc4522SJaegeuk Kim return f2fs_get_meta_page_retry(sbi, current_nat_addr(sbi, nid));
136e05df3b1SJaegeuk Kim }
137e05df3b1SJaegeuk Kim
get_next_nat_page(struct f2fs_sb_info * sbi,nid_t nid)138e05df3b1SJaegeuk Kim static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
139e05df3b1SJaegeuk Kim {
140e05df3b1SJaegeuk Kim struct page *src_page;
141e05df3b1SJaegeuk Kim struct page *dst_page;
142e05df3b1SJaegeuk Kim pgoff_t dst_off;
143e05df3b1SJaegeuk Kim void *src_addr;
144e05df3b1SJaegeuk Kim void *dst_addr;
145e05df3b1SJaegeuk Kim struct f2fs_nm_info *nm_i = NM_I(sbi);
146e05df3b1SJaegeuk Kim
14780551d17SChao Yu dst_off = next_nat_addr(sbi, current_nat_addr(sbi, nid));
148e05df3b1SJaegeuk Kim
149e05df3b1SJaegeuk Kim /* get current nat block page with lock */
15080551d17SChao Yu src_page = get_current_nat_page(sbi, nid);
151edc55aafSJaegeuk Kim if (IS_ERR(src_page))
152edc55aafSJaegeuk Kim return src_page;
1534d57b86dSChao Yu dst_page = f2fs_grab_meta_page(sbi, dst_off);
1549850cf4aSJaegeuk Kim f2fs_bug_on(sbi, PageDirty(src_page));
155e05df3b1SJaegeuk Kim
156e05df3b1SJaegeuk Kim src_addr = page_address(src_page);
157e05df3b1SJaegeuk Kim dst_addr = page_address(dst_page);
15809cbfeafSKirill A. Shutemov memcpy(dst_addr, src_addr, PAGE_SIZE);
159e05df3b1SJaegeuk Kim set_page_dirty(dst_page);
160e05df3b1SJaegeuk Kim f2fs_put_page(src_page, 1);
161e05df3b1SJaegeuk Kim
162e05df3b1SJaegeuk Kim set_to_next_nat(nm_i, nid);
163e05df3b1SJaegeuk Kim
164e05df3b1SJaegeuk Kim return dst_page;
165e05df3b1SJaegeuk Kim }
166e05df3b1SJaegeuk Kim
__alloc_nat_entry(struct f2fs_sb_info * sbi,nid_t nid,bool no_fail)16732410577SChao Yu static struct nat_entry *__alloc_nat_entry(struct f2fs_sb_info *sbi,
16832410577SChao Yu nid_t nid, bool no_fail)
16912f9ef37SYunlei He {
17012f9ef37SYunlei He struct nat_entry *new;
17112f9ef37SYunlei He
17232410577SChao Yu new = f2fs_kmem_cache_alloc(nat_entry_slab,
17332410577SChao Yu GFP_F2FS_ZERO, no_fail, sbi);
17412f9ef37SYunlei He if (new) {
17512f9ef37SYunlei He nat_set_nid(new, nid);
17612f9ef37SYunlei He nat_reset_flag(new);
17712f9ef37SYunlei He }
17812f9ef37SYunlei He return new;
17912f9ef37SYunlei He }
18012f9ef37SYunlei He
__free_nat_entry(struct nat_entry * e)18112f9ef37SYunlei He static void __free_nat_entry(struct nat_entry *e)
18212f9ef37SYunlei He {
18312f9ef37SYunlei He kmem_cache_free(nat_entry_slab, e);
18412f9ef37SYunlei He }
18512f9ef37SYunlei He
18612f9ef37SYunlei He /* must be locked by nat_tree_lock */
__init_nat_entry(struct f2fs_nm_info * nm_i,struct nat_entry * ne,struct f2fs_nat_entry * raw_ne,bool no_fail)18712f9ef37SYunlei He static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i,
18812f9ef37SYunlei He struct nat_entry *ne, struct f2fs_nat_entry *raw_ne, bool no_fail)
18912f9ef37SYunlei He {
19012f9ef37SYunlei He if (no_fail)
19112f9ef37SYunlei He f2fs_radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne);
19212f9ef37SYunlei He else if (radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne))
19312f9ef37SYunlei He return NULL;
19412f9ef37SYunlei He
19512f9ef37SYunlei He if (raw_ne)
19612f9ef37SYunlei He node_info_from_raw_nat(&ne->ni, raw_ne);
19722969158SChao Yu
19822969158SChao Yu spin_lock(&nm_i->nat_list_lock);
19912f9ef37SYunlei He list_add_tail(&ne->list, &nm_i->nat_entries);
20022969158SChao Yu spin_unlock(&nm_i->nat_list_lock);
20122969158SChao Yu
202a95ba66aSJaegeuk Kim nm_i->nat_cnt[TOTAL_NAT]++;
203a95ba66aSJaegeuk Kim nm_i->nat_cnt[RECLAIMABLE_NAT]++;
20412f9ef37SYunlei He return ne;
20512f9ef37SYunlei He }
20612f9ef37SYunlei He
__lookup_nat_cache(struct f2fs_nm_info * nm_i,nid_t n)207e05df3b1SJaegeuk Kim static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
208e05df3b1SJaegeuk Kim {
20922969158SChao Yu struct nat_entry *ne;
21022969158SChao Yu
21122969158SChao Yu ne = radix_tree_lookup(&nm_i->nat_root, n);
21222969158SChao Yu
21322969158SChao Yu /* for recent accessed nat entry, move it to tail of lru list */
21422969158SChao Yu if (ne && !get_nat_flag(ne, IS_DIRTY)) {
21522969158SChao Yu spin_lock(&nm_i->nat_list_lock);
21622969158SChao Yu if (!list_empty(&ne->list))
21722969158SChao Yu list_move_tail(&ne->list, &nm_i->nat_entries);
21822969158SChao Yu spin_unlock(&nm_i->nat_list_lock);
21922969158SChao Yu }
22022969158SChao Yu
22122969158SChao Yu return ne;
222e05df3b1SJaegeuk Kim }
223e05df3b1SJaegeuk Kim
__gang_lookup_nat_cache(struct f2fs_nm_info * nm_i,nid_t start,unsigned int nr,struct nat_entry ** ep)224e05df3b1SJaegeuk Kim static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
225e05df3b1SJaegeuk Kim nid_t start, unsigned int nr, struct nat_entry **ep)
226e05df3b1SJaegeuk Kim {
227e05df3b1SJaegeuk Kim return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
228e05df3b1SJaegeuk Kim }
229e05df3b1SJaegeuk Kim
__del_from_nat_cache(struct f2fs_nm_info * nm_i,struct nat_entry * e)230e05df3b1SJaegeuk Kim static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
231e05df3b1SJaegeuk Kim {
232e05df3b1SJaegeuk Kim radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
233a95ba66aSJaegeuk Kim nm_i->nat_cnt[TOTAL_NAT]--;
234a95ba66aSJaegeuk Kim nm_i->nat_cnt[RECLAIMABLE_NAT]--;
23512f9ef37SYunlei He __free_nat_entry(e);
236e05df3b1SJaegeuk Kim }
237e05df3b1SJaegeuk Kim
__grab_nat_entry_set(struct f2fs_nm_info * nm_i,struct nat_entry * ne)238780de47cSChao Yu static struct nat_entry_set *__grab_nat_entry_set(struct f2fs_nm_info *nm_i,
239309cc2b6SJaegeuk Kim struct nat_entry *ne)
240309cc2b6SJaegeuk Kim {
241309cc2b6SJaegeuk Kim nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
242309cc2b6SJaegeuk Kim struct nat_entry_set *head;
243309cc2b6SJaegeuk Kim
244309cc2b6SJaegeuk Kim head = radix_tree_lookup(&nm_i->nat_set_root, set);
245309cc2b6SJaegeuk Kim if (!head) {
24632410577SChao Yu head = f2fs_kmem_cache_alloc(nat_entry_set_slab,
24732410577SChao Yu GFP_NOFS, true, NULL);
248309cc2b6SJaegeuk Kim
249309cc2b6SJaegeuk Kim INIT_LIST_HEAD(&head->entry_list);
250309cc2b6SJaegeuk Kim INIT_LIST_HEAD(&head->set_list);
251309cc2b6SJaegeuk Kim head->set = set;
252309cc2b6SJaegeuk Kim head->entry_cnt = 0;
2539be32d72SJaegeuk Kim f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head);
254309cc2b6SJaegeuk Kim }
255780de47cSChao Yu return head;
256780de47cSChao Yu }
257780de47cSChao Yu
__set_nat_cache_dirty(struct f2fs_nm_info * nm_i,struct nat_entry * ne)258780de47cSChao Yu static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
259780de47cSChao Yu struct nat_entry *ne)
260780de47cSChao Yu {
261780de47cSChao Yu struct nat_entry_set *head;
262780de47cSChao Yu bool new_ne = nat_get_blkaddr(ne) == NEW_ADDR;
263780de47cSChao Yu
264780de47cSChao Yu if (!new_ne)
265780de47cSChao Yu head = __grab_nat_entry_set(nm_i, ne);
266780de47cSChao Yu
267780de47cSChao Yu /*
268780de47cSChao Yu * update entry_cnt in below condition:
269780de47cSChao Yu * 1. update NEW_ADDR to valid block address;
270780de47cSChao Yu * 2. update old block address to new one;
271780de47cSChao Yu */
272780de47cSChao Yu if (!new_ne && (get_nat_flag(ne, IS_PREALLOC) ||
273780de47cSChao Yu !get_nat_flag(ne, IS_DIRTY)))
274780de47cSChao Yu head->entry_cnt++;
275780de47cSChao Yu
276780de47cSChao Yu set_nat_flag(ne, IS_PREALLOC, new_ne);
277febeca6dSChao Yu
278febeca6dSChao Yu if (get_nat_flag(ne, IS_DIRTY))
279febeca6dSChao Yu goto refresh_list;
280febeca6dSChao Yu
281a95ba66aSJaegeuk Kim nm_i->nat_cnt[DIRTY_NAT]++;
282a95ba66aSJaegeuk Kim nm_i->nat_cnt[RECLAIMABLE_NAT]--;
283309cc2b6SJaegeuk Kim set_nat_flag(ne, IS_DIRTY, true);
284febeca6dSChao Yu refresh_list:
28522969158SChao Yu spin_lock(&nm_i->nat_list_lock);
286780de47cSChao Yu if (new_ne)
287febeca6dSChao Yu list_del_init(&ne->list);
288febeca6dSChao Yu else
289febeca6dSChao Yu list_move_tail(&ne->list, &head->entry_list);
29022969158SChao Yu spin_unlock(&nm_i->nat_list_lock);
291309cc2b6SJaegeuk Kim }
292309cc2b6SJaegeuk Kim
__clear_nat_cache_dirty(struct f2fs_nm_info * nm_i,struct nat_entry_set * set,struct nat_entry * ne)293309cc2b6SJaegeuk Kim static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
2940b28b71eSKinglong Mee struct nat_entry_set *set, struct nat_entry *ne)
295309cc2b6SJaegeuk Kim {
29622969158SChao Yu spin_lock(&nm_i->nat_list_lock);
297309cc2b6SJaegeuk Kim list_move_tail(&ne->list, &nm_i->nat_entries);
29822969158SChao Yu spin_unlock(&nm_i->nat_list_lock);
29922969158SChao Yu
300309cc2b6SJaegeuk Kim set_nat_flag(ne, IS_DIRTY, false);
3010b28b71eSKinglong Mee set->entry_cnt--;
302a95ba66aSJaegeuk Kim nm_i->nat_cnt[DIRTY_NAT]--;
303a95ba66aSJaegeuk Kim nm_i->nat_cnt[RECLAIMABLE_NAT]++;
304309cc2b6SJaegeuk Kim }
305309cc2b6SJaegeuk Kim
__gang_lookup_nat_set(struct f2fs_nm_info * nm_i,nid_t start,unsigned int nr,struct nat_entry_set ** ep)306309cc2b6SJaegeuk Kim static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
307309cc2b6SJaegeuk Kim nid_t start, unsigned int nr, struct nat_entry_set **ep)
308309cc2b6SJaegeuk Kim {
309309cc2b6SJaegeuk Kim return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep,
310309cc2b6SJaegeuk Kim start, nr);
311309cc2b6SJaegeuk Kim }
312309cc2b6SJaegeuk Kim
f2fs_in_warm_node_list(struct f2fs_sb_info * sbi,const struct folio * folio)3131a58a41cSMatthew Wilcox (Oracle) bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, const struct folio *folio)
31450fa53ecSChao Yu {
3151a58a41cSMatthew Wilcox (Oracle) return NODE_MAPPING(sbi) == folio->mapping &&
3161a58a41cSMatthew Wilcox (Oracle) IS_DNODE(&folio->page) && is_cold_node(&folio->page);
31750fa53ecSChao Yu }
31850fa53ecSChao Yu
f2fs_init_fsync_node_info(struct f2fs_sb_info * sbi)31950fa53ecSChao Yu void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi)
32050fa53ecSChao Yu {
32150fa53ecSChao Yu spin_lock_init(&sbi->fsync_node_lock);
32250fa53ecSChao Yu INIT_LIST_HEAD(&sbi->fsync_node_list);
32350fa53ecSChao Yu sbi->fsync_seg_id = 0;
32450fa53ecSChao Yu sbi->fsync_node_num = 0;
32550fa53ecSChao Yu }
32650fa53ecSChao Yu
f2fs_add_fsync_node_entry(struct f2fs_sb_info * sbi,struct page * page)32750fa53ecSChao Yu static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi,
32850fa53ecSChao Yu struct page *page)
32950fa53ecSChao Yu {
33050fa53ecSChao Yu struct fsync_node_entry *fn;
33150fa53ecSChao Yu unsigned long flags;
33250fa53ecSChao Yu unsigned int seq_id;
33350fa53ecSChao Yu
33432410577SChao Yu fn = f2fs_kmem_cache_alloc(fsync_node_entry_slab,
33532410577SChao Yu GFP_NOFS, true, NULL);
33650fa53ecSChao Yu
33750fa53ecSChao Yu get_page(page);
33850fa53ecSChao Yu fn->page = page;
33950fa53ecSChao Yu INIT_LIST_HEAD(&fn->list);
34050fa53ecSChao Yu
34150fa53ecSChao Yu spin_lock_irqsave(&sbi->fsync_node_lock, flags);
34250fa53ecSChao Yu list_add_tail(&fn->list, &sbi->fsync_node_list);
34350fa53ecSChao Yu fn->seq_id = sbi->fsync_seg_id++;
34450fa53ecSChao Yu seq_id = fn->seq_id;
34550fa53ecSChao Yu sbi->fsync_node_num++;
34650fa53ecSChao Yu spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
34750fa53ecSChao Yu
34850fa53ecSChao Yu return seq_id;
34950fa53ecSChao Yu }
35050fa53ecSChao Yu
f2fs_del_fsync_node_entry(struct f2fs_sb_info * sbi,struct page * page)35150fa53ecSChao Yu void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page)
35250fa53ecSChao Yu {
35350fa53ecSChao Yu struct fsync_node_entry *fn;
35450fa53ecSChao Yu unsigned long flags;
35550fa53ecSChao Yu
35650fa53ecSChao Yu spin_lock_irqsave(&sbi->fsync_node_lock, flags);
35750fa53ecSChao Yu list_for_each_entry(fn, &sbi->fsync_node_list, list) {
35850fa53ecSChao Yu if (fn->page == page) {
35950fa53ecSChao Yu list_del(&fn->list);
36050fa53ecSChao Yu sbi->fsync_node_num--;
36150fa53ecSChao Yu spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
36250fa53ecSChao Yu kmem_cache_free(fsync_node_entry_slab, fn);
36350fa53ecSChao Yu put_page(page);
36450fa53ecSChao Yu return;
36550fa53ecSChao Yu }
36650fa53ecSChao Yu }
36750fa53ecSChao Yu spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
36850fa53ecSChao Yu f2fs_bug_on(sbi, 1);
36950fa53ecSChao Yu }
37050fa53ecSChao Yu
f2fs_reset_fsync_node_info(struct f2fs_sb_info * sbi)37150fa53ecSChao Yu void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi)
37250fa53ecSChao Yu {
37350fa53ecSChao Yu unsigned long flags;
37450fa53ecSChao Yu
37550fa53ecSChao Yu spin_lock_irqsave(&sbi->fsync_node_lock, flags);
37650fa53ecSChao Yu sbi->fsync_seg_id = 0;
37750fa53ecSChao Yu spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
37850fa53ecSChao Yu }
37950fa53ecSChao Yu
f2fs_need_dentry_mark(struct f2fs_sb_info * sbi,nid_t nid)3804d57b86dSChao Yu int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
3812dcf51abSJaegeuk Kim {
3822dcf51abSJaegeuk Kim struct f2fs_nm_info *nm_i = NM_I(sbi);
3832dcf51abSJaegeuk Kim struct nat_entry *e;
3842dcf51abSJaegeuk Kim bool need = false;
3852dcf51abSJaegeuk Kim
386e4544b63STim Murray f2fs_down_read(&nm_i->nat_tree_lock);
3872dcf51abSJaegeuk Kim e = __lookup_nat_cache(nm_i, nid);
3882dcf51abSJaegeuk Kim if (e) {
3892dcf51abSJaegeuk Kim if (!get_nat_flag(e, IS_CHECKPOINTED) &&
3902dcf51abSJaegeuk Kim !get_nat_flag(e, HAS_FSYNCED_INODE))
3912dcf51abSJaegeuk Kim need = true;
3922dcf51abSJaegeuk Kim }
393e4544b63STim Murray f2fs_up_read(&nm_i->nat_tree_lock);
3942dcf51abSJaegeuk Kim return need;
3952dcf51abSJaegeuk Kim }
3962dcf51abSJaegeuk Kim
f2fs_is_checkpointed_node(struct f2fs_sb_info * sbi,nid_t nid)3974d57b86dSChao Yu bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
398e05df3b1SJaegeuk Kim {
399e05df3b1SJaegeuk Kim struct f2fs_nm_info *nm_i = NM_I(sbi);
400e05df3b1SJaegeuk Kim struct nat_entry *e;
40188bd02c9SJaegeuk Kim bool is_cp = true;
402e05df3b1SJaegeuk Kim
403e4544b63STim Murray f2fs_down_read(&nm_i->nat_tree_lock);
404e05df3b1SJaegeuk Kim e = __lookup_nat_cache(nm_i, nid);
4057ef35e3bSJaegeuk Kim if (e && !get_nat_flag(e, IS_CHECKPOINTED))
40688bd02c9SJaegeuk Kim is_cp = false;
407e4544b63STim Murray f2fs_up_read(&nm_i->nat_tree_lock);
408e05df3b1SJaegeuk Kim return is_cp;
409e05df3b1SJaegeuk Kim }
410e05df3b1SJaegeuk Kim
f2fs_need_inode_block_update(struct f2fs_sb_info * sbi,nid_t ino)4114d57b86dSChao Yu bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
412b6fe5873SJaegeuk Kim {
413b6fe5873SJaegeuk Kim struct f2fs_nm_info *nm_i = NM_I(sbi);
414b6fe5873SJaegeuk Kim struct nat_entry *e;
41588bd02c9SJaegeuk Kim bool need_update = true;
416b6fe5873SJaegeuk Kim
417e4544b63STim Murray f2fs_down_read(&nm_i->nat_tree_lock);
41888bd02c9SJaegeuk Kim e = __lookup_nat_cache(nm_i, ino);
41988bd02c9SJaegeuk Kim if (e && get_nat_flag(e, HAS_LAST_FSYNC) &&
42088bd02c9SJaegeuk Kim (get_nat_flag(e, IS_CHECKPOINTED) ||
42188bd02c9SJaegeuk Kim get_nat_flag(e, HAS_FSYNCED_INODE)))
42288bd02c9SJaegeuk Kim need_update = false;
423e4544b63STim Murray f2fs_up_read(&nm_i->nat_tree_lock);
42488bd02c9SJaegeuk Kim return need_update;
425b6fe5873SJaegeuk Kim }
426b6fe5873SJaegeuk Kim
42712f9ef37SYunlei He /* must be locked by nat_tree_lock */
cache_nat_entry(struct f2fs_sb_info * sbi,nid_t nid,struct f2fs_nat_entry * ne)4281515aef0SChao Yu static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
429e05df3b1SJaegeuk Kim struct f2fs_nat_entry *ne)
430e05df3b1SJaegeuk Kim {
4311515aef0SChao Yu struct f2fs_nm_info *nm_i = NM_I(sbi);
43212f9ef37SYunlei He struct nat_entry *new, *e;
4339be32d72SJaegeuk Kim
4340df035c7SJaegeuk Kim /* Let's mitigate lock contention of nat_tree_lock during checkpoint */
435e4544b63STim Murray if (f2fs_rwsem_is_locked(&sbi->cp_global_sem))
4360df035c7SJaegeuk Kim return;
4370df035c7SJaegeuk Kim
43832410577SChao Yu new = __alloc_nat_entry(sbi, nid, false);
43912f9ef37SYunlei He if (!new)
44012f9ef37SYunlei He return;
44112f9ef37SYunlei He
442e4544b63STim Murray f2fs_down_write(&nm_i->nat_tree_lock);
443e05df3b1SJaegeuk Kim e = __lookup_nat_cache(nm_i, nid);
44412f9ef37SYunlei He if (!e)
44512f9ef37SYunlei He e = __init_nat_entry(nm_i, new, ne, false);
44612f9ef37SYunlei He else
4470c0b471eSEric Biggers f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) ||
4480c0b471eSEric Biggers nat_get_blkaddr(e) !=
4490c0b471eSEric Biggers le32_to_cpu(ne->block_addr) ||
4501515aef0SChao Yu nat_get_version(e) != ne->version);
451e4544b63STim Murray f2fs_up_write(&nm_i->nat_tree_lock);
45212f9ef37SYunlei He if (e != new)
45312f9ef37SYunlei He __free_nat_entry(new);
454e05df3b1SJaegeuk Kim }
455e05df3b1SJaegeuk Kim
set_node_addr(struct f2fs_sb_info * sbi,struct node_info * ni,block_t new_blkaddr,bool fsync_done)456e05df3b1SJaegeuk Kim static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
457479f40c4SJaegeuk Kim block_t new_blkaddr, bool fsync_done)
458e05df3b1SJaegeuk Kim {
459e05df3b1SJaegeuk Kim struct f2fs_nm_info *nm_i = NM_I(sbi);
460e05df3b1SJaegeuk Kim struct nat_entry *e;
46132410577SChao Yu struct nat_entry *new = __alloc_nat_entry(sbi, ni->nid, true);
4629be32d72SJaegeuk Kim
463e4544b63STim Murray f2fs_down_write(&nm_i->nat_tree_lock);
464e05df3b1SJaegeuk Kim e = __lookup_nat_cache(nm_i, ni->nid);
465e05df3b1SJaegeuk Kim if (!e) {
46612f9ef37SYunlei He e = __init_nat_entry(nm_i, new, NULL, true);
4675c27f4eeSChao Yu copy_node_info(&e->ni, ni);
4689850cf4aSJaegeuk Kim f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR);
469e05df3b1SJaegeuk Kim } else if (new_blkaddr == NEW_ADDR) {
470e05df3b1SJaegeuk Kim /*
471e05df3b1SJaegeuk Kim * when nid is reallocated,
472e05df3b1SJaegeuk Kim * previous nat entry can be remained in nat cache.
473e05df3b1SJaegeuk Kim * So, reinitialize it with new information.
474e05df3b1SJaegeuk Kim */
4755c27f4eeSChao Yu copy_node_info(&e->ni, ni);
4769850cf4aSJaegeuk Kim f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR);
477e05df3b1SJaegeuk Kim }
47812f9ef37SYunlei He /* let's free early to reduce memory consumption */
47912f9ef37SYunlei He if (e != new)
48012f9ef37SYunlei He __free_nat_entry(new);
481e05df3b1SJaegeuk Kim
482e05df3b1SJaegeuk Kim /* sanity check */
4839850cf4aSJaegeuk Kim f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr);
4849850cf4aSJaegeuk Kim f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR &&
485e05df3b1SJaegeuk Kim new_blkaddr == NULL_ADDR);
4869850cf4aSJaegeuk Kim f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
487e05df3b1SJaegeuk Kim new_blkaddr == NEW_ADDR);
48893770ab7SChao Yu f2fs_bug_on(sbi, __is_valid_data_blkaddr(nat_get_blkaddr(e)) &&
489e05df3b1SJaegeuk Kim new_blkaddr == NEW_ADDR);
490e05df3b1SJaegeuk Kim
491e1c42045Sarter97 /* increment version no as node is removed */
492e05df3b1SJaegeuk Kim if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
493e05df3b1SJaegeuk Kim unsigned char version = nat_get_version(e);
4945f029c04SYi Zhuang
495e05df3b1SJaegeuk Kim nat_set_version(e, inc_node_version(version));
496e05df3b1SJaegeuk Kim }
497e05df3b1SJaegeuk Kim
498e05df3b1SJaegeuk Kim /* change address */
499e05df3b1SJaegeuk Kim nat_set_blkaddr(e, new_blkaddr);
50093770ab7SChao Yu if (!__is_valid_data_blkaddr(new_blkaddr))
50188bd02c9SJaegeuk Kim set_nat_flag(e, IS_CHECKPOINTED, false);
502e05df3b1SJaegeuk Kim __set_nat_cache_dirty(nm_i, e);
503479f40c4SJaegeuk Kim
504479f40c4SJaegeuk Kim /* update fsync_mark if its inode nat entry is still alive */
505d5b692b7SChao Yu if (ni->nid != ni->ino)
506479f40c4SJaegeuk Kim e = __lookup_nat_cache(nm_i, ni->ino);
50788bd02c9SJaegeuk Kim if (e) {
50888bd02c9SJaegeuk Kim if (fsync_done && ni->nid == ni->ino)
50988bd02c9SJaegeuk Kim set_nat_flag(e, HAS_FSYNCED_INODE, true);
51088bd02c9SJaegeuk Kim set_nat_flag(e, HAS_LAST_FSYNC, fsync_done);
51188bd02c9SJaegeuk Kim }
512e4544b63STim Murray f2fs_up_write(&nm_i->nat_tree_lock);
513e05df3b1SJaegeuk Kim }
514e05df3b1SJaegeuk Kim
f2fs_try_to_free_nats(struct f2fs_sb_info * sbi,int nr_shrink)5154d57b86dSChao Yu int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
516e05df3b1SJaegeuk Kim {
517e05df3b1SJaegeuk Kim struct f2fs_nm_info *nm_i = NM_I(sbi);
5181b38dc8eSJaegeuk Kim int nr = nr_shrink;
519e05df3b1SJaegeuk Kim
520e4544b63STim Murray if (!f2fs_down_write_trylock(&nm_i->nat_tree_lock))
521b873b798SJaegeuk Kim return 0;
522e05df3b1SJaegeuk Kim
52322969158SChao Yu spin_lock(&nm_i->nat_list_lock);
52422969158SChao Yu while (nr_shrink) {
525e05df3b1SJaegeuk Kim struct nat_entry *ne;
52622969158SChao Yu
52722969158SChao Yu if (list_empty(&nm_i->nat_entries))
52822969158SChao Yu break;
52922969158SChao Yu
530e05df3b1SJaegeuk Kim ne = list_first_entry(&nm_i->nat_entries,
531e05df3b1SJaegeuk Kim struct nat_entry, list);
53222969158SChao Yu list_del(&ne->list);
53322969158SChao Yu spin_unlock(&nm_i->nat_list_lock);
53422969158SChao Yu
535e05df3b1SJaegeuk Kim __del_from_nat_cache(nm_i, ne);
536e05df3b1SJaegeuk Kim nr_shrink--;
53722969158SChao Yu
53822969158SChao Yu spin_lock(&nm_i->nat_list_lock);
539e05df3b1SJaegeuk Kim }
54022969158SChao Yu spin_unlock(&nm_i->nat_list_lock);
54122969158SChao Yu
542e4544b63STim Murray f2fs_up_write(&nm_i->nat_tree_lock);
5431b38dc8eSJaegeuk Kim return nr - nr_shrink;
544e05df3b1SJaegeuk Kim }
545e05df3b1SJaegeuk Kim
f2fs_get_node_info(struct f2fs_sb_info * sbi,nid_t nid,struct node_info * ni,bool checkpoint_context)5467735730dSChao Yu int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
547a9419b63SJaegeuk Kim struct node_info *ni, bool checkpoint_context)
548e05df3b1SJaegeuk Kim {
549e05df3b1SJaegeuk Kim struct f2fs_nm_info *nm_i = NM_I(sbi);
550e05df3b1SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
551b7ad7512SChao Yu struct f2fs_journal *journal = curseg->journal;
552e05df3b1SJaegeuk Kim nid_t start_nid = START_NID(nid);
553e05df3b1SJaegeuk Kim struct f2fs_nat_block *nat_blk;
554e05df3b1SJaegeuk Kim struct page *page = NULL;
555e05df3b1SJaegeuk Kim struct f2fs_nat_entry ne;
556e05df3b1SJaegeuk Kim struct nat_entry *e;
55766a82d1fSYunlei He pgoff_t index;
55893770ab7SChao Yu block_t blkaddr;
559e05df3b1SJaegeuk Kim int i;
560e05df3b1SJaegeuk Kim
56176f01376SDmitry Antipov ni->flag = 0;
562e05df3b1SJaegeuk Kim ni->nid = nid;
5632eeb0dceSJaegeuk Kim retry:
564e05df3b1SJaegeuk Kim /* Check nat cache */
565e4544b63STim Murray f2fs_down_read(&nm_i->nat_tree_lock);
566e05df3b1SJaegeuk Kim e = __lookup_nat_cache(nm_i, nid);
567e05df3b1SJaegeuk Kim if (e) {
568e05df3b1SJaegeuk Kim ni->ino = nat_get_ino(e);
569e05df3b1SJaegeuk Kim ni->blk_addr = nat_get_blkaddr(e);
570e05df3b1SJaegeuk Kim ni->version = nat_get_version(e);
571e4544b63STim Murray f2fs_up_read(&nm_i->nat_tree_lock);
5727735730dSChao Yu return 0;
5731515aef0SChao Yu }
574e05df3b1SJaegeuk Kim
5752eeb0dceSJaegeuk Kim /*
5762eeb0dceSJaegeuk Kim * Check current segment summary by trying to grab journal_rwsem first.
5772eeb0dceSJaegeuk Kim * This sem is on the critical path on the checkpoint requiring the above
5782eeb0dceSJaegeuk Kim * nat_tree_lock. Therefore, we should retry, if we failed to grab here
5792eeb0dceSJaegeuk Kim * while not bothering checkpoint.
5802eeb0dceSJaegeuk Kim */
581e4544b63STim Murray if (!f2fs_rwsem_is_locked(&sbi->cp_global_sem) || checkpoint_context) {
582b7ad7512SChao Yu down_read(&curseg->journal_rwsem);
583e4544b63STim Murray } else if (f2fs_rwsem_is_contended(&nm_i->nat_tree_lock) ||
584a9419b63SJaegeuk Kim !down_read_trylock(&curseg->journal_rwsem)) {
585e4544b63STim Murray f2fs_up_read(&nm_i->nat_tree_lock);
5862eeb0dceSJaegeuk Kim goto retry;
5872eeb0dceSJaegeuk Kim }
5882eeb0dceSJaegeuk Kim
5894d57b86dSChao Yu i = f2fs_lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0);
590e05df3b1SJaegeuk Kim if (i >= 0) {
591dfc08a12SChao Yu ne = nat_in_journal(journal, i);
592e05df3b1SJaegeuk Kim node_info_from_raw_nat(ni, &ne);
593e05df3b1SJaegeuk Kim }
594b7ad7512SChao Yu up_read(&curseg->journal_rwsem);
59566a82d1fSYunlei He if (i >= 0) {
596e4544b63STim Murray f2fs_up_read(&nm_i->nat_tree_lock);
597e05df3b1SJaegeuk Kim goto cache;
59866a82d1fSYunlei He }
599e05df3b1SJaegeuk Kim
600e05df3b1SJaegeuk Kim /* Fill node_info from nat page */
60166a82d1fSYunlei He index = current_nat_addr(sbi, nid);
602e4544b63STim Murray f2fs_up_read(&nm_i->nat_tree_lock);
60366a82d1fSYunlei He
6044d57b86dSChao Yu page = f2fs_get_meta_page(sbi, index);
6057735730dSChao Yu if (IS_ERR(page))
6067735730dSChao Yu return PTR_ERR(page);
6077735730dSChao Yu
608e05df3b1SJaegeuk Kim nat_blk = (struct f2fs_nat_block *)page_address(page);
609e05df3b1SJaegeuk Kim ne = nat_blk->entries[nid - start_nid];
610e05df3b1SJaegeuk Kim node_info_from_raw_nat(ni, &ne);
611e05df3b1SJaegeuk Kim f2fs_put_page(page, 1);
612e05df3b1SJaegeuk Kim cache:
61393770ab7SChao Yu blkaddr = le32_to_cpu(ne.block_addr);
61493770ab7SChao Yu if (__is_valid_data_blkaddr(blkaddr) &&
61593770ab7SChao Yu !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE))
61693770ab7SChao Yu return -EFAULT;
61793770ab7SChao Yu
618e05df3b1SJaegeuk Kim /* cache nat entry */
6191515aef0SChao Yu cache_nat_entry(sbi, nid, &ne);
6207735730dSChao Yu return 0;
621e05df3b1SJaegeuk Kim }
622e05df3b1SJaegeuk Kim
62379344efbSJaegeuk Kim /*
62479344efbSJaegeuk Kim * readahead MAX_RA_NODE number of node pages.
62579344efbSJaegeuk Kim */
f2fs_ra_node_pages(struct page * parent,int start,int n)6264d57b86dSChao Yu static void f2fs_ra_node_pages(struct page *parent, int start, int n)
62779344efbSJaegeuk Kim {
62879344efbSJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
62979344efbSJaegeuk Kim struct blk_plug plug;
63079344efbSJaegeuk Kim int i, end;
63179344efbSJaegeuk Kim nid_t nid;
63279344efbSJaegeuk Kim
63379344efbSJaegeuk Kim blk_start_plug(&plug);
63479344efbSJaegeuk Kim
63579344efbSJaegeuk Kim /* Then, try readahead for siblings of the desired node */
63679344efbSJaegeuk Kim end = start + n;
637d7e9a903SDaniel Rosenberg end = min(end, (int)NIDS_PER_BLOCK);
63879344efbSJaegeuk Kim for (i = start; i < end; i++) {
63979344efbSJaegeuk Kim nid = get_nid(parent, i, false);
6404d57b86dSChao Yu f2fs_ra_node_page(sbi, nid);
64179344efbSJaegeuk Kim }
64279344efbSJaegeuk Kim
64379344efbSJaegeuk Kim blk_finish_plug(&plug);
64479344efbSJaegeuk Kim }
64579344efbSJaegeuk Kim
f2fs_get_next_page_offset(struct dnode_of_data * dn,pgoff_t pgofs)6464d57b86dSChao Yu pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs)
6473cf45747SChao Yu {
6483cf45747SChao Yu const long direct_index = ADDRS_PER_INODE(dn->inode);
649d02a6e61SChao Yu const long direct_blks = ADDRS_PER_BLOCK(dn->inode);
650d02a6e61SChao Yu const long indirect_blks = ADDRS_PER_BLOCK(dn->inode) * NIDS_PER_BLOCK;
651d02a6e61SChao Yu unsigned int skipped_unit = ADDRS_PER_BLOCK(dn->inode);
6523cf45747SChao Yu int cur_level = dn->cur_level;
6533cf45747SChao Yu int max_level = dn->max_level;
6543cf45747SChao Yu pgoff_t base = 0;
6553cf45747SChao Yu
6563cf45747SChao Yu if (!dn->max_level)
6573cf45747SChao Yu return pgofs + 1;
6583cf45747SChao Yu
6593cf45747SChao Yu while (max_level-- > cur_level)
6603cf45747SChao Yu skipped_unit *= NIDS_PER_BLOCK;
6613cf45747SChao Yu
6623cf45747SChao Yu switch (dn->max_level) {
6633cf45747SChao Yu case 3:
6643cf45747SChao Yu base += 2 * indirect_blks;
665df561f66SGustavo A. R. Silva fallthrough;
6663cf45747SChao Yu case 2:
6673cf45747SChao Yu base += 2 * direct_blks;
668df561f66SGustavo A. R. Silva fallthrough;
6693cf45747SChao Yu case 1:
6703cf45747SChao Yu base += direct_index;
6713cf45747SChao Yu break;
6723cf45747SChao Yu default:
6733cf45747SChao Yu f2fs_bug_on(F2FS_I_SB(dn->inode), 1);
6743cf45747SChao Yu }
6753cf45747SChao Yu
6763cf45747SChao Yu return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base;
6773cf45747SChao Yu }
6783cf45747SChao Yu
6790a8165d7SJaegeuk Kim /*
680e05df3b1SJaegeuk Kim * The maximum depth is four.
681e05df3b1SJaegeuk Kim * Offset[0] will have raw inode offset.
682e05df3b1SJaegeuk Kim */
get_node_path(struct inode * inode,long block,int offset[4],unsigned int noffset[4])68381ca7350SChao Yu static int get_node_path(struct inode *inode, long block,
684de93653fSJaegeuk Kim int offset[4], unsigned int noffset[4])
685e05df3b1SJaegeuk Kim {
68681ca7350SChao Yu const long direct_index = ADDRS_PER_INODE(inode);
687d02a6e61SChao Yu const long direct_blks = ADDRS_PER_BLOCK(inode);
688e05df3b1SJaegeuk Kim const long dptrs_per_blk = NIDS_PER_BLOCK;
689d02a6e61SChao Yu const long indirect_blks = ADDRS_PER_BLOCK(inode) * NIDS_PER_BLOCK;
690e05df3b1SJaegeuk Kim const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
691e05df3b1SJaegeuk Kim int n = 0;
692e05df3b1SJaegeuk Kim int level = 0;
693e05df3b1SJaegeuk Kim
694e05df3b1SJaegeuk Kim noffset[0] = 0;
695e05df3b1SJaegeuk Kim
696e05df3b1SJaegeuk Kim if (block < direct_index) {
69725c0a6e5SNamjae Jeon offset[n] = block;
698e05df3b1SJaegeuk Kim goto got;
699e05df3b1SJaegeuk Kim }
700e05df3b1SJaegeuk Kim block -= direct_index;
701e05df3b1SJaegeuk Kim if (block < direct_blks) {
702e05df3b1SJaegeuk Kim offset[n++] = NODE_DIR1_BLOCK;
703e05df3b1SJaegeuk Kim noffset[n] = 1;
70425c0a6e5SNamjae Jeon offset[n] = block;
705e05df3b1SJaegeuk Kim level = 1;
706e05df3b1SJaegeuk Kim goto got;
707e05df3b1SJaegeuk Kim }
708e05df3b1SJaegeuk Kim block -= direct_blks;
709e05df3b1SJaegeuk Kim if (block < direct_blks) {
710e05df3b1SJaegeuk Kim offset[n++] = NODE_DIR2_BLOCK;
711e05df3b1SJaegeuk Kim noffset[n] = 2;
71225c0a6e5SNamjae Jeon offset[n] = block;
713e05df3b1SJaegeuk Kim level = 1;
714e05df3b1SJaegeuk Kim goto got;
715e05df3b1SJaegeuk Kim }
716e05df3b1SJaegeuk Kim block -= direct_blks;
717e05df3b1SJaegeuk Kim if (block < indirect_blks) {
718e05df3b1SJaegeuk Kim offset[n++] = NODE_IND1_BLOCK;
719e05df3b1SJaegeuk Kim noffset[n] = 3;
720e05df3b1SJaegeuk Kim offset[n++] = block / direct_blks;
721e05df3b1SJaegeuk Kim noffset[n] = 4 + offset[n - 1];
72225c0a6e5SNamjae Jeon offset[n] = block % direct_blks;
723e05df3b1SJaegeuk Kim level = 2;
724e05df3b1SJaegeuk Kim goto got;
725e05df3b1SJaegeuk Kim }
726e05df3b1SJaegeuk Kim block -= indirect_blks;
727e05df3b1SJaegeuk Kim if (block < indirect_blks) {
728e05df3b1SJaegeuk Kim offset[n++] = NODE_IND2_BLOCK;
729e05df3b1SJaegeuk Kim noffset[n] = 4 + dptrs_per_blk;
730e05df3b1SJaegeuk Kim offset[n++] = block / direct_blks;
731e05df3b1SJaegeuk Kim noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
73225c0a6e5SNamjae Jeon offset[n] = block % direct_blks;
733e05df3b1SJaegeuk Kim level = 2;
734e05df3b1SJaegeuk Kim goto got;
735e05df3b1SJaegeuk Kim }
736e05df3b1SJaegeuk Kim block -= indirect_blks;
737e05df3b1SJaegeuk Kim if (block < dindirect_blks) {
738e05df3b1SJaegeuk Kim offset[n++] = NODE_DIND_BLOCK;
739e05df3b1SJaegeuk Kim noffset[n] = 5 + (dptrs_per_blk * 2);
740e05df3b1SJaegeuk Kim offset[n++] = block / indirect_blks;
741e05df3b1SJaegeuk Kim noffset[n] = 6 + (dptrs_per_blk * 2) +
742e05df3b1SJaegeuk Kim offset[n - 1] * (dptrs_per_blk + 1);
743e05df3b1SJaegeuk Kim offset[n++] = (block / direct_blks) % dptrs_per_blk;
744e05df3b1SJaegeuk Kim noffset[n] = 7 + (dptrs_per_blk * 2) +
745e05df3b1SJaegeuk Kim offset[n - 2] * (dptrs_per_blk + 1) +
746e05df3b1SJaegeuk Kim offset[n - 1];
74725c0a6e5SNamjae Jeon offset[n] = block % direct_blks;
748e05df3b1SJaegeuk Kim level = 3;
749e05df3b1SJaegeuk Kim goto got;
750e05df3b1SJaegeuk Kim } else {
751adb6dc19SJaegeuk Kim return -E2BIG;
752e05df3b1SJaegeuk Kim }
753e05df3b1SJaegeuk Kim got:
754e05df3b1SJaegeuk Kim return level;
755e05df3b1SJaegeuk Kim }
756e05df3b1SJaegeuk Kim
757e05df3b1SJaegeuk Kim /*
758e05df3b1SJaegeuk Kim * Caller should call f2fs_put_dnode(dn).
7594f4124d0SChao Yu * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
7607a88ddb5SChao Yu * f2fs_unlock_op() only if mode is set with ALLOC_NODE.
761e05df3b1SJaegeuk Kim */
f2fs_get_dnode_of_data(struct dnode_of_data * dn,pgoff_t index,int mode)7624d57b86dSChao Yu int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
763e05df3b1SJaegeuk Kim {
7644081363fSJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
765e05df3b1SJaegeuk Kim struct page *npage[4];
766f1a3b98eSJaegeuk Kim struct page *parent = NULL;
767e05df3b1SJaegeuk Kim int offset[4];
768e05df3b1SJaegeuk Kim unsigned int noffset[4];
769e05df3b1SJaegeuk Kim nid_t nids[4];
7703cf45747SChao Yu int level, i = 0;
771e05df3b1SJaegeuk Kim int err = 0;
772e05df3b1SJaegeuk Kim
77381ca7350SChao Yu level = get_node_path(dn->inode, index, offset, noffset);
774adb6dc19SJaegeuk Kim if (level < 0)
775adb6dc19SJaegeuk Kim return level;
776e05df3b1SJaegeuk Kim
777e05df3b1SJaegeuk Kim nids[0] = dn->inode->i_ino;
7781646cfacSJaegeuk Kim npage[0] = dn->inode_page;
7791646cfacSJaegeuk Kim
7801646cfacSJaegeuk Kim if (!npage[0]) {
7811cf6b567SChao Yu npage[0] = f2fs_get_inode_page(sbi, nids[0]);
782e05df3b1SJaegeuk Kim if (IS_ERR(npage[0]))
783e05df3b1SJaegeuk Kim return PTR_ERR(npage[0]);
7841646cfacSJaegeuk Kim }
785f1a3b98eSJaegeuk Kim
786f1a3b98eSJaegeuk Kim /* if inline_data is set, should not report any block indices */
787f1a3b98eSJaegeuk Kim if (f2fs_has_inline_data(dn->inode) && index) {
78876629165SJaegeuk Kim err = -ENOENT;
789f1a3b98eSJaegeuk Kim f2fs_put_page(npage[0], 1);
790f1a3b98eSJaegeuk Kim goto release_out;
791f1a3b98eSJaegeuk Kim }
792f1a3b98eSJaegeuk Kim
793e05df3b1SJaegeuk Kim parent = npage[0];
79452c2db3fSChangman Lee if (level != 0)
795e05df3b1SJaegeuk Kim nids[1] = get_nid(parent, offset[0], true);
796e05df3b1SJaegeuk Kim dn->inode_page = npage[0];
797e05df3b1SJaegeuk Kim dn->inode_page_locked = true;
798e05df3b1SJaegeuk Kim
799e05df3b1SJaegeuk Kim /* get indirect or direct nodes */
800e05df3b1SJaegeuk Kim for (i = 1; i <= level; i++) {
801e05df3b1SJaegeuk Kim bool done = false;
802e05df3b1SJaegeuk Kim
803266e97a8SJaegeuk Kim if (!nids[i] && mode == ALLOC_NODE) {
804e05df3b1SJaegeuk Kim /* alloc new node */
8054d57b86dSChao Yu if (!f2fs_alloc_nid(sbi, &(nids[i]))) {
806e05df3b1SJaegeuk Kim err = -ENOSPC;
807e05df3b1SJaegeuk Kim goto release_pages;
808e05df3b1SJaegeuk Kim }
809e05df3b1SJaegeuk Kim
810e05df3b1SJaegeuk Kim dn->nid = nids[i];
8114d57b86dSChao Yu npage[i] = f2fs_new_node_page(dn, noffset[i]);
812e05df3b1SJaegeuk Kim if (IS_ERR(npage[i])) {
8134d57b86dSChao Yu f2fs_alloc_nid_failed(sbi, nids[i]);
814e05df3b1SJaegeuk Kim err = PTR_ERR(npage[i]);
815e05df3b1SJaegeuk Kim goto release_pages;
816e05df3b1SJaegeuk Kim }
817e05df3b1SJaegeuk Kim
818e05df3b1SJaegeuk Kim set_nid(parent, offset[i - 1], nids[i], i == 1);
8194d57b86dSChao Yu f2fs_alloc_nid_done(sbi, nids[i]);
820e05df3b1SJaegeuk Kim done = true;
821266e97a8SJaegeuk Kim } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
8224d57b86dSChao Yu npage[i] = f2fs_get_node_page_ra(parent, offset[i - 1]);
823e05df3b1SJaegeuk Kim if (IS_ERR(npage[i])) {
824e05df3b1SJaegeuk Kim err = PTR_ERR(npage[i]);
825e05df3b1SJaegeuk Kim goto release_pages;
826e05df3b1SJaegeuk Kim }
827e05df3b1SJaegeuk Kim done = true;
828e05df3b1SJaegeuk Kim }
829e05df3b1SJaegeuk Kim if (i == 1) {
830e05df3b1SJaegeuk Kim dn->inode_page_locked = false;
831e05df3b1SJaegeuk Kim unlock_page(parent);
832e05df3b1SJaegeuk Kim } else {
833e05df3b1SJaegeuk Kim f2fs_put_page(parent, 1);
834e05df3b1SJaegeuk Kim }
835e05df3b1SJaegeuk Kim
836e05df3b1SJaegeuk Kim if (!done) {
8374d57b86dSChao Yu npage[i] = f2fs_get_node_page(sbi, nids[i]);
838e05df3b1SJaegeuk Kim if (IS_ERR(npage[i])) {
839e05df3b1SJaegeuk Kim err = PTR_ERR(npage[i]);
840e05df3b1SJaegeuk Kim f2fs_put_page(npage[0], 0);
841e05df3b1SJaegeuk Kim goto release_out;
842e05df3b1SJaegeuk Kim }
843e05df3b1SJaegeuk Kim }
844e05df3b1SJaegeuk Kim if (i < level) {
845e05df3b1SJaegeuk Kim parent = npage[i];
846e05df3b1SJaegeuk Kim nids[i + 1] = get_nid(parent, offset[i], false);
847e05df3b1SJaegeuk Kim }
848e05df3b1SJaegeuk Kim }
849e05df3b1SJaegeuk Kim dn->nid = nids[level];
850e05df3b1SJaegeuk Kim dn->ofs_in_node = offset[level];
851e05df3b1SJaegeuk Kim dn->node_page = npage[level];
852a2ced1ceSChao Yu dn->data_blkaddr = f2fs_data_blkaddr(dn);
85394afd6d6SChao Yu
85494afd6d6SChao Yu if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) &&
85594afd6d6SChao Yu f2fs_sb_has_readonly(sbi)) {
8564b99ecd3SChao Yu unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
8574b99ecd3SChao Yu unsigned int ofs_in_node = dn->ofs_in_node;
8584b99ecd3SChao Yu pgoff_t fofs = index;
8594b99ecd3SChao Yu unsigned int c_len;
86094afd6d6SChao Yu block_t blkaddr;
86194afd6d6SChao Yu
8624b99ecd3SChao Yu /* should align fofs and ofs_in_node to cluster_size */
8634b99ecd3SChao Yu if (fofs % cluster_size) {
8644b99ecd3SChao Yu fofs = round_down(fofs, cluster_size);
8654b99ecd3SChao Yu ofs_in_node = round_down(ofs_in_node, cluster_size);
8664b99ecd3SChao Yu }
8674b99ecd3SChao Yu
8684b99ecd3SChao Yu c_len = f2fs_cluster_blocks_are_contiguous(dn, ofs_in_node);
86994afd6d6SChao Yu if (!c_len)
87094afd6d6SChao Yu goto out;
87194afd6d6SChao Yu
8724b99ecd3SChao Yu blkaddr = data_blkaddr(dn->inode, dn->node_page, ofs_in_node);
87394afd6d6SChao Yu if (blkaddr == COMPRESS_ADDR)
87494afd6d6SChao Yu blkaddr = data_blkaddr(dn->inode, dn->node_page,
8754b99ecd3SChao Yu ofs_in_node + 1);
87694afd6d6SChao Yu
877e7547dacSJaegeuk Kim f2fs_update_read_extent_tree_range_compressed(dn->inode,
8784b99ecd3SChao Yu fofs, blkaddr, cluster_size, c_len);
87994afd6d6SChao Yu }
88094afd6d6SChao Yu out:
881e05df3b1SJaegeuk Kim return 0;
882e05df3b1SJaegeuk Kim
883e05df3b1SJaegeuk Kim release_pages:
884e05df3b1SJaegeuk Kim f2fs_put_page(parent, 1);
885e05df3b1SJaegeuk Kim if (i > 1)
886e05df3b1SJaegeuk Kim f2fs_put_page(npage[0], 0);
887e05df3b1SJaegeuk Kim release_out:
888e05df3b1SJaegeuk Kim dn->inode_page = NULL;
889e05df3b1SJaegeuk Kim dn->node_page = NULL;
8903cf45747SChao Yu if (err == -ENOENT) {
8913cf45747SChao Yu dn->cur_level = i;
8923cf45747SChao Yu dn->max_level = level;
8930a2aa8fbSJaegeuk Kim dn->ofs_in_node = offset[level];
8943cf45747SChao Yu }
895e05df3b1SJaegeuk Kim return err;
896e05df3b1SJaegeuk Kim }
897e05df3b1SJaegeuk Kim
truncate_node(struct dnode_of_data * dn)8987735730dSChao Yu static int truncate_node(struct dnode_of_data *dn)
899e05df3b1SJaegeuk Kim {
9004081363fSJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
901e05df3b1SJaegeuk Kim struct node_info ni;
9027735730dSChao Yu int err;
9030ea295ddSPan Bian pgoff_t index;
904e05df3b1SJaegeuk Kim
905a9419b63SJaegeuk Kim err = f2fs_get_node_info(sbi, dn->nid, &ni, false);
9067735730dSChao Yu if (err)
9077735730dSChao Yu return err;
908e05df3b1SJaegeuk Kim
9096babe00cSChao Yu if (ni.blk_addr != NEW_ADDR &&
9106babe00cSChao Yu !f2fs_is_valid_blkaddr(sbi, ni.blk_addr, DATA_GENERIC_ENHANCE)) {
9116babe00cSChao Yu f2fs_err_ratelimited(sbi,
9126babe00cSChao Yu "nat entry is corrupted, run fsck to fix it, ino:%u, "
9136babe00cSChao Yu "nid:%u, blkaddr:%u", ni.ino, ni.nid, ni.blk_addr);
9146babe00cSChao Yu set_sbi_flag(sbi, SBI_NEED_FSCK);
9156babe00cSChao Yu f2fs_handle_error(sbi, ERROR_INCONSISTENT_NAT);
9166babe00cSChao Yu return -EFSCORRUPTED;
9176babe00cSChao Yu }
9186babe00cSChao Yu
919e05df3b1SJaegeuk Kim /* Deallocate node address */
920e53c568fSYi Sun f2fs_invalidate_blocks(sbi, ni.blk_addr, 1);
921000519f2SChao Yu dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino);
922479f40c4SJaegeuk Kim set_node_addr(sbi, &ni, NULL_ADDR, false);
923e05df3b1SJaegeuk Kim
924e05df3b1SJaegeuk Kim if (dn->nid == dn->inode->i_ino) {
9254d57b86dSChao Yu f2fs_remove_orphan_inode(sbi, dn->nid);
926e05df3b1SJaegeuk Kim dec_valid_inode_count(sbi);
9270f18b462SJaegeuk Kim f2fs_inode_synced(dn->inode);
928e05df3b1SJaegeuk Kim }
929000519f2SChao Yu
930e05df3b1SJaegeuk Kim clear_node_page_dirty(dn->node_page);
931caf0047eSChao Yu set_sbi_flag(sbi, SBI_IS_DIRTY);
932e05df3b1SJaegeuk Kim
9335697e94dSChao Yu index = page_folio(dn->node_page)->index;
934e05df3b1SJaegeuk Kim f2fs_put_page(dn->node_page, 1);
935bf39c00aSJaegeuk Kim
936bf39c00aSJaegeuk Kim invalidate_mapping_pages(NODE_MAPPING(sbi),
9370ea295ddSPan Bian index, index);
938bf39c00aSJaegeuk Kim
939e05df3b1SJaegeuk Kim dn->node_page = NULL;
94051dd6249SNamjae Jeon trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
9417735730dSChao Yu
9427735730dSChao Yu return 0;
943e05df3b1SJaegeuk Kim }
944e05df3b1SJaegeuk Kim
truncate_dnode(struct dnode_of_data * dn)945e05df3b1SJaegeuk Kim static int truncate_dnode(struct dnode_of_data *dn)
946e05df3b1SJaegeuk Kim {
947a6ec8378SChao Yu struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
948e05df3b1SJaegeuk Kim struct page *page;
9497735730dSChao Yu int err;
950e05df3b1SJaegeuk Kim
951e05df3b1SJaegeuk Kim if (dn->nid == 0)
952e05df3b1SJaegeuk Kim return 1;
953e05df3b1SJaegeuk Kim
954e05df3b1SJaegeuk Kim /* get direct node */
955a6ec8378SChao Yu page = f2fs_get_node_page(sbi, dn->nid);
95645586c70SMasahiro Yamada if (PTR_ERR(page) == -ENOENT)
957e05df3b1SJaegeuk Kim return 1;
958e05df3b1SJaegeuk Kim else if (IS_ERR(page))
959e05df3b1SJaegeuk Kim return PTR_ERR(page);
960e05df3b1SJaegeuk Kim
961a6ec8378SChao Yu if (IS_INODE(page) || ino_of_node(page) != dn->inode->i_ino) {
962a6ec8378SChao Yu f2fs_err(sbi, "incorrect node reference, ino: %lu, nid: %u, ino_of_node: %u",
963a6ec8378SChao Yu dn->inode->i_ino, dn->nid, ino_of_node(page));
964a6ec8378SChao Yu set_sbi_flag(sbi, SBI_NEED_FSCK);
965a6ec8378SChao Yu f2fs_handle_error(sbi, ERROR_INVALID_NODE_REFERENCE);
966a6ec8378SChao Yu f2fs_put_page(page, 1);
967a6ec8378SChao Yu return -EFSCORRUPTED;
968a6ec8378SChao Yu }
969a6ec8378SChao Yu
970e05df3b1SJaegeuk Kim /* Make dnode_of_data for parameter */
971e05df3b1SJaegeuk Kim dn->node_page = page;
972e05df3b1SJaegeuk Kim dn->ofs_in_node = 0;
973a6ec8378SChao Yu f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
9747735730dSChao Yu err = truncate_node(dn);
9750135c482SChao Yu if (err) {
9760135c482SChao Yu f2fs_put_page(page, 1);
9777735730dSChao Yu return err;
9780135c482SChao Yu }
9797735730dSChao Yu
980e05df3b1SJaegeuk Kim return 1;
981e05df3b1SJaegeuk Kim }
982e05df3b1SJaegeuk Kim
truncate_nodes(struct dnode_of_data * dn,unsigned int nofs,int ofs,int depth)983e05df3b1SJaegeuk Kim static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
984e05df3b1SJaegeuk Kim int ofs, int depth)
985e05df3b1SJaegeuk Kim {
986e05df3b1SJaegeuk Kim struct dnode_of_data rdn = *dn;
987e05df3b1SJaegeuk Kim struct page *page;
988e05df3b1SJaegeuk Kim struct f2fs_node *rn;
989e05df3b1SJaegeuk Kim nid_t child_nid;
990e05df3b1SJaegeuk Kim unsigned int child_nofs;
991e05df3b1SJaegeuk Kim int freed = 0;
992e05df3b1SJaegeuk Kim int i, ret;
993e05df3b1SJaegeuk Kim
994e05df3b1SJaegeuk Kim if (dn->nid == 0)
995e05df3b1SJaegeuk Kim return NIDS_PER_BLOCK + 1;
996e05df3b1SJaegeuk Kim
99751dd6249SNamjae Jeon trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
99851dd6249SNamjae Jeon
9994d57b86dSChao Yu page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
100051dd6249SNamjae Jeon if (IS_ERR(page)) {
100151dd6249SNamjae Jeon trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
1002e05df3b1SJaegeuk Kim return PTR_ERR(page);
100351dd6249SNamjae Jeon }
1004e05df3b1SJaegeuk Kim
10054d57b86dSChao Yu f2fs_ra_node_pages(page, ofs, NIDS_PER_BLOCK);
100679344efbSJaegeuk Kim
100745590710SGu Zheng rn = F2FS_NODE(page);
1008e05df3b1SJaegeuk Kim if (depth < 3) {
1009e05df3b1SJaegeuk Kim for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
1010e05df3b1SJaegeuk Kim child_nid = le32_to_cpu(rn->in.nid[i]);
1011e05df3b1SJaegeuk Kim if (child_nid == 0)
1012e05df3b1SJaegeuk Kim continue;
1013e05df3b1SJaegeuk Kim rdn.nid = child_nid;
1014e05df3b1SJaegeuk Kim ret = truncate_dnode(&rdn);
1015e05df3b1SJaegeuk Kim if (ret < 0)
1016e05df3b1SJaegeuk Kim goto out_err;
101712719ae1SJaegeuk Kim if (set_nid(page, i, 0, false))
101893bae099SJaegeuk Kim dn->node_changed = true;
1019e05df3b1SJaegeuk Kim }
1020e05df3b1SJaegeuk Kim } else {
1021e05df3b1SJaegeuk Kim child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
1022e05df3b1SJaegeuk Kim for (i = ofs; i < NIDS_PER_BLOCK; i++) {
1023e05df3b1SJaegeuk Kim child_nid = le32_to_cpu(rn->in.nid[i]);
1024e05df3b1SJaegeuk Kim if (child_nid == 0) {
1025e05df3b1SJaegeuk Kim child_nofs += NIDS_PER_BLOCK + 1;
1026e05df3b1SJaegeuk Kim continue;
1027e05df3b1SJaegeuk Kim }
1028e05df3b1SJaegeuk Kim rdn.nid = child_nid;
1029e05df3b1SJaegeuk Kim ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
1030e05df3b1SJaegeuk Kim if (ret == (NIDS_PER_BLOCK + 1)) {
103112719ae1SJaegeuk Kim if (set_nid(page, i, 0, false))
103293bae099SJaegeuk Kim dn->node_changed = true;
1033e05df3b1SJaegeuk Kim child_nofs += ret;
1034e05df3b1SJaegeuk Kim } else if (ret < 0 && ret != -ENOENT) {
1035e05df3b1SJaegeuk Kim goto out_err;
1036e05df3b1SJaegeuk Kim }
1037e05df3b1SJaegeuk Kim }
1038e05df3b1SJaegeuk Kim freed = child_nofs;
1039e05df3b1SJaegeuk Kim }
1040e05df3b1SJaegeuk Kim
1041e05df3b1SJaegeuk Kim if (!ofs) {
1042e05df3b1SJaegeuk Kim /* remove current indirect node */
1043e05df3b1SJaegeuk Kim dn->node_page = page;
10447735730dSChao Yu ret = truncate_node(dn);
10457735730dSChao Yu if (ret)
10467735730dSChao Yu goto out_err;
1047e05df3b1SJaegeuk Kim freed++;
1048e05df3b1SJaegeuk Kim } else {
1049e05df3b1SJaegeuk Kim f2fs_put_page(page, 1);
1050e05df3b1SJaegeuk Kim }
105151dd6249SNamjae Jeon trace_f2fs_truncate_nodes_exit(dn->inode, freed);
1052e05df3b1SJaegeuk Kim return freed;
1053e05df3b1SJaegeuk Kim
1054e05df3b1SJaegeuk Kim out_err:
1055e05df3b1SJaegeuk Kim f2fs_put_page(page, 1);
105651dd6249SNamjae Jeon trace_f2fs_truncate_nodes_exit(dn->inode, ret);
1057e05df3b1SJaegeuk Kim return ret;
1058e05df3b1SJaegeuk Kim }
1059e05df3b1SJaegeuk Kim
truncate_partial_nodes(struct dnode_of_data * dn,struct f2fs_inode * ri,int * offset,int depth)1060e05df3b1SJaegeuk Kim static int truncate_partial_nodes(struct dnode_of_data *dn,
1061e05df3b1SJaegeuk Kim struct f2fs_inode *ri, int *offset, int depth)
1062e05df3b1SJaegeuk Kim {
1063e05df3b1SJaegeuk Kim struct page *pages[2];
1064e05df3b1SJaegeuk Kim nid_t nid[3];
1065e05df3b1SJaegeuk Kim nid_t child_nid;
1066e05df3b1SJaegeuk Kim int err = 0;
1067e05df3b1SJaegeuk Kim int i;
1068e05df3b1SJaegeuk Kim int idx = depth - 2;
1069e05df3b1SJaegeuk Kim
1070cffaa097SChao Yu nid[0] = get_nid(dn->inode_page, offset[0], true);
1071e05df3b1SJaegeuk Kim if (!nid[0])
1072e05df3b1SJaegeuk Kim return 0;
1073e05df3b1SJaegeuk Kim
1074e05df3b1SJaegeuk Kim /* get indirect nodes in the path */
1075a225dca3Sshifei10.ge for (i = 0; i < idx + 1; i++) {
1076e1c42045Sarter97 /* reference count'll be increased */
10774d57b86dSChao Yu pages[i] = f2fs_get_node_page(F2FS_I_SB(dn->inode), nid[i]);
1078e05df3b1SJaegeuk Kim if (IS_ERR(pages[i])) {
1079e05df3b1SJaegeuk Kim err = PTR_ERR(pages[i]);
1080a225dca3Sshifei10.ge idx = i - 1;
1081e05df3b1SJaegeuk Kim goto fail;
1082e05df3b1SJaegeuk Kim }
1083e05df3b1SJaegeuk Kim nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
1084e05df3b1SJaegeuk Kim }
1085e05df3b1SJaegeuk Kim
10864d57b86dSChao Yu f2fs_ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK);
108779344efbSJaegeuk Kim
1088e05df3b1SJaegeuk Kim /* free direct nodes linked to a partial indirect node */
1089a225dca3Sshifei10.ge for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
1090e05df3b1SJaegeuk Kim child_nid = get_nid(pages[idx], i, false);
1091e05df3b1SJaegeuk Kim if (!child_nid)
1092e05df3b1SJaegeuk Kim continue;
1093e05df3b1SJaegeuk Kim dn->nid = child_nid;
1094e05df3b1SJaegeuk Kim err = truncate_dnode(dn);
1095e05df3b1SJaegeuk Kim if (err < 0)
1096e05df3b1SJaegeuk Kim goto fail;
109712719ae1SJaegeuk Kim if (set_nid(pages[idx], i, 0, false))
109893bae099SJaegeuk Kim dn->node_changed = true;
1099e05df3b1SJaegeuk Kim }
1100e05df3b1SJaegeuk Kim
1101a225dca3Sshifei10.ge if (offset[idx + 1] == 0) {
1102e05df3b1SJaegeuk Kim dn->node_page = pages[idx];
1103e05df3b1SJaegeuk Kim dn->nid = nid[idx];
11047735730dSChao Yu err = truncate_node(dn);
11057735730dSChao Yu if (err)
11067735730dSChao Yu goto fail;
1107e05df3b1SJaegeuk Kim } else {
1108e05df3b1SJaegeuk Kim f2fs_put_page(pages[idx], 1);
1109e05df3b1SJaegeuk Kim }
1110e05df3b1SJaegeuk Kim offset[idx]++;
1111a225dca3Sshifei10.ge offset[idx + 1] = 0;
1112a225dca3Sshifei10.ge idx--;
1113e05df3b1SJaegeuk Kim fail:
1114a225dca3Sshifei10.ge for (i = idx; i >= 0; i--)
1115e05df3b1SJaegeuk Kim f2fs_put_page(pages[i], 1);
111651dd6249SNamjae Jeon
111751dd6249SNamjae Jeon trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
111851dd6249SNamjae Jeon
1119e05df3b1SJaegeuk Kim return err;
1120e05df3b1SJaegeuk Kim }
1121e05df3b1SJaegeuk Kim
11220a8165d7SJaegeuk Kim /*
1123e05df3b1SJaegeuk Kim * All the block addresses of data and nodes should be nullified.
1124e05df3b1SJaegeuk Kim */
f2fs_truncate_inode_blocks(struct inode * inode,pgoff_t from)11254d57b86dSChao Yu int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from)
1126e05df3b1SJaegeuk Kim {
11274081363fSJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1128e05df3b1SJaegeuk Kim int err = 0, cont = 1;
1129e05df3b1SJaegeuk Kim int level, offset[4], noffset[4];
11307dd690c8SJaegeuk Kim unsigned int nofs = 0;
113158bfaf44SJaegeuk Kim struct f2fs_inode *ri;
1132e05df3b1SJaegeuk Kim struct dnode_of_data dn;
1133520b17e0SMatthew Wilcox (Oracle) struct folio *folio;
1134e05df3b1SJaegeuk Kim
113551dd6249SNamjae Jeon trace_f2fs_truncate_inode_blocks_enter(inode, from);
113651dd6249SNamjae Jeon
113781ca7350SChao Yu level = get_node_path(inode, from, offset, noffset);
1138e6494977SChao Yu if (level <= 0) {
1139e6494977SChao Yu if (!level) {
1140e6494977SChao Yu level = -EFSCORRUPTED;
1141e6494977SChao Yu f2fs_err(sbi, "%s: inode ino=%lx has corrupted node block, from:%lu addrs:%u",
1142e6494977SChao Yu __func__, inode->i_ino,
1143e6494977SChao Yu from, ADDRS_PER_INODE(inode));
1144e6494977SChao Yu set_sbi_flag(sbi, SBI_NEED_FSCK);
1145e6494977SChao Yu }
11469039d835SYubo Feng trace_f2fs_truncate_inode_blocks_exit(inode, level);
1147adb6dc19SJaegeuk Kim return level;
11489039d835SYubo Feng }
1149ff373558SJaegeuk Kim
11501cf6b567SChao Yu folio = f2fs_get_inode_folio(sbi, inode->i_ino);
1151520b17e0SMatthew Wilcox (Oracle) if (IS_ERR(folio)) {
1152520b17e0SMatthew Wilcox (Oracle) trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(folio));
1153520b17e0SMatthew Wilcox (Oracle) return PTR_ERR(folio);
115451dd6249SNamjae Jeon }
1155e05df3b1SJaegeuk Kim
1156520b17e0SMatthew Wilcox (Oracle) set_new_dnode(&dn, inode, &folio->page, NULL, 0);
1157520b17e0SMatthew Wilcox (Oracle) folio_unlock(folio);
1158e05df3b1SJaegeuk Kim
1159520b17e0SMatthew Wilcox (Oracle) ri = F2FS_INODE(&folio->page);
1160e05df3b1SJaegeuk Kim switch (level) {
1161e05df3b1SJaegeuk Kim case 0:
1162e05df3b1SJaegeuk Kim case 1:
1163e05df3b1SJaegeuk Kim nofs = noffset[1];
1164e05df3b1SJaegeuk Kim break;
1165e05df3b1SJaegeuk Kim case 2:
1166e05df3b1SJaegeuk Kim nofs = noffset[1];
1167e05df3b1SJaegeuk Kim if (!offset[level - 1])
1168e05df3b1SJaegeuk Kim goto skip_partial;
116958bfaf44SJaegeuk Kim err = truncate_partial_nodes(&dn, ri, offset, level);
1170e05df3b1SJaegeuk Kim if (err < 0 && err != -ENOENT)
1171e05df3b1SJaegeuk Kim goto fail;
1172e05df3b1SJaegeuk Kim nofs += 1 + NIDS_PER_BLOCK;
1173e05df3b1SJaegeuk Kim break;
1174e05df3b1SJaegeuk Kim case 3:
1175e05df3b1SJaegeuk Kim nofs = 5 + 2 * NIDS_PER_BLOCK;
1176e05df3b1SJaegeuk Kim if (!offset[level - 1])
1177e05df3b1SJaegeuk Kim goto skip_partial;
117858bfaf44SJaegeuk Kim err = truncate_partial_nodes(&dn, ri, offset, level);
1179e05df3b1SJaegeuk Kim if (err < 0 && err != -ENOENT)
1180e05df3b1SJaegeuk Kim goto fail;
1181e05df3b1SJaegeuk Kim break;
1182e05df3b1SJaegeuk Kim default:
1183e05df3b1SJaegeuk Kim BUG();
1184e05df3b1SJaegeuk Kim }
1185e05df3b1SJaegeuk Kim
1186e05df3b1SJaegeuk Kim skip_partial:
1187e05df3b1SJaegeuk Kim while (cont) {
1188520b17e0SMatthew Wilcox (Oracle) dn.nid = get_nid(&folio->page, offset[0], true);
1189e05df3b1SJaegeuk Kim switch (offset[0]) {
1190e05df3b1SJaegeuk Kim case NODE_DIR1_BLOCK:
1191e05df3b1SJaegeuk Kim case NODE_DIR2_BLOCK:
1192e05df3b1SJaegeuk Kim err = truncate_dnode(&dn);
1193e05df3b1SJaegeuk Kim break;
1194e05df3b1SJaegeuk Kim
1195e05df3b1SJaegeuk Kim case NODE_IND1_BLOCK:
1196e05df3b1SJaegeuk Kim case NODE_IND2_BLOCK:
1197e05df3b1SJaegeuk Kim err = truncate_nodes(&dn, nofs, offset[1], 2);
1198e05df3b1SJaegeuk Kim break;
1199e05df3b1SJaegeuk Kim
1200e05df3b1SJaegeuk Kim case NODE_DIND_BLOCK:
1201e05df3b1SJaegeuk Kim err = truncate_nodes(&dn, nofs, offset[1], 3);
1202e05df3b1SJaegeuk Kim cont = 0;
1203e05df3b1SJaegeuk Kim break;
1204e05df3b1SJaegeuk Kim
1205e05df3b1SJaegeuk Kim default:
1206e05df3b1SJaegeuk Kim BUG();
1207e05df3b1SJaegeuk Kim }
120892c556edSChao Yu if (err == -ENOENT) {
1209520b17e0SMatthew Wilcox (Oracle) set_sbi_flag(F2FS_F_SB(folio), SBI_NEED_FSCK);
121092c556edSChao Yu f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR);
121192c556edSChao Yu f2fs_err_ratelimited(sbi,
121292c556edSChao Yu "truncate node fail, ino:%lu, nid:%u, "
121392c556edSChao Yu "offset[0]:%d, offset[1]:%d, nofs:%d",
121492c556edSChao Yu inode->i_ino, dn.nid, offset[0],
121592c556edSChao Yu offset[1], nofs);
121692c556edSChao Yu err = 0;
121792c556edSChao Yu }
121892c556edSChao Yu if (err < 0)
1219e05df3b1SJaegeuk Kim goto fail;
1220520b17e0SMatthew Wilcox (Oracle) if (offset[1] == 0 && get_nid(&folio->page, offset[0], true)) {
1221520b17e0SMatthew Wilcox (Oracle) folio_lock(folio);
1222520b17e0SMatthew Wilcox (Oracle) BUG_ON(folio->mapping != NODE_MAPPING(sbi));
1223520b17e0SMatthew Wilcox (Oracle) set_nid(&folio->page, offset[0], 0, true);
1224520b17e0SMatthew Wilcox (Oracle) folio_unlock(folio);
1225e05df3b1SJaegeuk Kim }
1226e05df3b1SJaegeuk Kim offset[1] = 0;
1227e05df3b1SJaegeuk Kim offset[0]++;
1228e05df3b1SJaegeuk Kim nofs += err;
1229e05df3b1SJaegeuk Kim }
1230e05df3b1SJaegeuk Kim fail:
1231520b17e0SMatthew Wilcox (Oracle) f2fs_folio_put(folio, false);
123251dd6249SNamjae Jeon trace_f2fs_truncate_inode_blocks_exit(inode, err);
1233e05df3b1SJaegeuk Kim return err > 0 ? 0 : err;
1234e05df3b1SJaegeuk Kim }
1235e05df3b1SJaegeuk Kim
12369c77f754SJaegeuk Kim /* caller must lock inode page */
f2fs_truncate_xattr_node(struct inode * inode)12374d57b86dSChao Yu int f2fs_truncate_xattr_node(struct inode *inode)
12384f16fb0fSJaegeuk Kim {
12394081363fSJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
12404f16fb0fSJaegeuk Kim nid_t nid = F2FS_I(inode)->i_xattr_nid;
12414f16fb0fSJaegeuk Kim struct dnode_of_data dn;
12424f16fb0fSJaegeuk Kim struct page *npage;
12437735730dSChao Yu int err;
12444f16fb0fSJaegeuk Kim
12454f16fb0fSJaegeuk Kim if (!nid)
12464f16fb0fSJaegeuk Kim return 0;
12474f16fb0fSJaegeuk Kim
12482aac2538SChao Yu npage = f2fs_get_xnode_page(sbi, nid);
12494f16fb0fSJaegeuk Kim if (IS_ERR(npage))
12504f16fb0fSJaegeuk Kim return PTR_ERR(npage);
12514f16fb0fSJaegeuk Kim
12527735730dSChao Yu set_new_dnode(&dn, inode, NULL, npage, nid);
12537735730dSChao Yu err = truncate_node(&dn);
12547735730dSChao Yu if (err) {
12557735730dSChao Yu f2fs_put_page(npage, 1);
12567735730dSChao Yu return err;
12577735730dSChao Yu }
12587735730dSChao Yu
1259205b9822SJaegeuk Kim f2fs_i_xnid_write(inode, 0);
126065985d93SJaegeuk Kim
12614f16fb0fSJaegeuk Kim return 0;
12624f16fb0fSJaegeuk Kim }
12634f16fb0fSJaegeuk Kim
126439936837SJaegeuk Kim /*
12654f4124d0SChao Yu * Caller should grab and release a rwsem by calling f2fs_lock_op() and
12664f4124d0SChao Yu * f2fs_unlock_op().
126739936837SJaegeuk Kim */
f2fs_remove_inode_page(struct inode * inode)12684d57b86dSChao Yu int f2fs_remove_inode_page(struct inode *inode)
1269e05df3b1SJaegeuk Kim {
1270e05df3b1SJaegeuk Kim struct dnode_of_data dn;
127113ec7297SChao Yu int err;
1272e05df3b1SJaegeuk Kim
1273c2e69583SJaegeuk Kim set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
12744d57b86dSChao Yu err = f2fs_get_dnode_of_data(&dn, 0, LOOKUP_NODE);
127513ec7297SChao Yu if (err)
127613ec7297SChao Yu return err;
1277e05df3b1SJaegeuk Kim
12784d57b86dSChao Yu err = f2fs_truncate_xattr_node(inode);
127913ec7297SChao Yu if (err) {
1280c2e69583SJaegeuk Kim f2fs_put_dnode(&dn);
128113ec7297SChao Yu return err;
1282e05df3b1SJaegeuk Kim }
1283c2e69583SJaegeuk Kim
1284c2e69583SJaegeuk Kim /* remove potential inline_data blocks */
1285cf5817ceSJaegeuk Kim if (!IS_DEVICE_ALIASING(inode) &&
1286cf5817ceSJaegeuk Kim (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1287cf5817ceSJaegeuk Kim S_ISLNK(inode->i_mode)))
12884d57b86dSChao Yu f2fs_truncate_data_blocks_range(&dn, 1);
1289c2e69583SJaegeuk Kim
1290e1c42045Sarter97 /* 0 is possible, after f2fs_new_inode() has failed */
12918d714f8aSJaegeuk Kim if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
12928d714f8aSJaegeuk Kim f2fs_put_dnode(&dn);
12938d714f8aSJaegeuk Kim return -EIO;
12948d714f8aSJaegeuk Kim }
12958b6810f8SChao Yu
12968b6810f8SChao Yu if (unlikely(inode->i_blocks != 0 && inode->i_blocks != 8)) {
1297097a7686SChao Yu f2fs_warn(F2FS_I_SB(inode),
1298097a7686SChao Yu "f2fs_remove_inode_page: inconsistent i_blocks, ino:%lu, iblocks:%llu",
1299dcbb4c10SJoe Perches inode->i_ino, (unsigned long long)inode->i_blocks);
13008b6810f8SChao Yu set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
13018b6810f8SChao Yu }
1302c2e69583SJaegeuk Kim
1303c2e69583SJaegeuk Kim /* will put inode & node pages */
13047735730dSChao Yu err = truncate_node(&dn);
13057735730dSChao Yu if (err) {
13067735730dSChao Yu f2fs_put_dnode(&dn);
13077735730dSChao Yu return err;
13087735730dSChao Yu }
130913ec7297SChao Yu return 0;
1310e05df3b1SJaegeuk Kim }
1311e05df3b1SJaegeuk Kim
f2fs_new_inode_page(struct inode * inode)13124d57b86dSChao Yu struct page *f2fs_new_inode_page(struct inode *inode)
1313e05df3b1SJaegeuk Kim {
1314e05df3b1SJaegeuk Kim struct dnode_of_data dn;
1315e05df3b1SJaegeuk Kim
1316e05df3b1SJaegeuk Kim /* allocate inode page for new inode */
1317e05df3b1SJaegeuk Kim set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
131844a83ff6SJaegeuk Kim
131944a83ff6SJaegeuk Kim /* caller should f2fs_put_page(page, 1); */
13204d57b86dSChao Yu return f2fs_new_node_page(&dn, 0);
1321e05df3b1SJaegeuk Kim }
1322e05df3b1SJaegeuk Kim
f2fs_new_node_page(struct dnode_of_data * dn,unsigned int ofs)13234d57b86dSChao Yu struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
1324e05df3b1SJaegeuk Kim {
13254081363fSJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
132625cc5d3bSJaegeuk Kim struct node_info new_ni;
1327e05df3b1SJaegeuk Kim struct page *page;
1328e05df3b1SJaegeuk Kim int err;
1329e05df3b1SJaegeuk Kim
133091942321SJaegeuk Kim if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1331e05df3b1SJaegeuk Kim return ERR_PTR(-EPERM);
1332e05df3b1SJaegeuk Kim
1333300e129cSJaegeuk Kim page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false);
1334e05df3b1SJaegeuk Kim if (!page)
1335e05df3b1SJaegeuk Kim return ERR_PTR(-ENOMEM);
1336e05df3b1SJaegeuk Kim
13370abd675eSChao Yu if (unlikely((err = inc_valid_node_count(sbi, dn->inode, !ofs))))
13389c02740cSJaegeuk Kim goto fail;
13390abd675eSChao Yu
134025cc5d3bSJaegeuk Kim #ifdef CONFIG_F2FS_CHECK_FS
1341a9419b63SJaegeuk Kim err = f2fs_get_node_info(sbi, dn->nid, &new_ni, false);
13427735730dSChao Yu if (err) {
13437735730dSChao Yu dec_valid_node_count(sbi, dn->inode, !ofs);
13447735730dSChao Yu goto fail;
13457735730dSChao Yu }
1346141170b7SChao Yu if (unlikely(new_ni.blk_addr != NULL_ADDR)) {
1347141170b7SChao Yu err = -EFSCORRUPTED;
13480fa4e57cSChao Yu dec_valid_node_count(sbi, dn->inode, !ofs);
1349141170b7SChao Yu set_sbi_flag(sbi, SBI_NEED_FSCK);
135081520c68SChao Yu f2fs_warn_ratelimited(sbi,
135181520c68SChao Yu "f2fs_new_node_page: inconsistent nat entry, "
135281520c68SChao Yu "ino:%u, nid:%u, blkaddr:%u, ver:%u, flag:%u",
135381520c68SChao Yu new_ni.ino, new_ni.nid, new_ni.blk_addr,
135481520c68SChao Yu new_ni.version, new_ni.flag);
135581520c68SChao Yu f2fs_handle_error(sbi, ERROR_INCONSISTENT_NAT);
1356141170b7SChao Yu goto fail;
1357141170b7SChao Yu }
135825cc5d3bSJaegeuk Kim #endif
135925cc5d3bSJaegeuk Kim new_ni.nid = dn->nid;
1360e05df3b1SJaegeuk Kim new_ni.ino = dn->inode->i_ino;
136125cc5d3bSJaegeuk Kim new_ni.blk_addr = NULL_ADDR;
136225cc5d3bSJaegeuk Kim new_ni.flag = 0;
136325cc5d3bSJaegeuk Kim new_ni.version = 0;
1364479f40c4SJaegeuk Kim set_node_addr(sbi, &new_ni, NEW_ADDR, false);
13659c02740cSJaegeuk Kim
1366bae0ee7aSChao Yu f2fs_wait_on_page_writeback(page, NODE, true, true);
13679c02740cSJaegeuk Kim fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
1368c5667575SChao Yu set_cold_node(page, S_ISDIR(dn->inode->i_mode));
1369237c0790SJaegeuk Kim if (!PageUptodate(page))
13709c02740cSJaegeuk Kim SetPageUptodate(page);
137112719ae1SJaegeuk Kim if (set_page_dirty(page))
137212719ae1SJaegeuk Kim dn->node_changed = true;
1373e05df3b1SJaegeuk Kim
13744bc8e9bcSChao Yu if (f2fs_has_xattr_block(ofs))
1375205b9822SJaegeuk Kim f2fs_i_xnid_write(dn->inode, dn->nid);
1376479bd73aSJaegeuk Kim
1377e05df3b1SJaegeuk Kim if (ofs == 0)
1378e05df3b1SJaegeuk Kim inc_valid_inode_count(sbi);
1379e05df3b1SJaegeuk Kim return page;
1380e05df3b1SJaegeuk Kim fail:
138171e9fec5SJaegeuk Kim clear_node_page_dirty(page);
1382e05df3b1SJaegeuk Kim f2fs_put_page(page, 1);
1383e05df3b1SJaegeuk Kim return ERR_PTR(err);
1384e05df3b1SJaegeuk Kim }
1385e05df3b1SJaegeuk Kim
138656ae674cSJaegeuk Kim /*
138756ae674cSJaegeuk Kim * Caller should do after getting the following values.
138856ae674cSJaegeuk Kim * 0: f2fs_put_page(page, 0)
138986531d6bSJaegeuk Kim * LOCKED_PAGE or error: f2fs_put_page(page, 1)
139056ae674cSJaegeuk Kim */
read_node_page(struct page * page,blk_opf_t op_flags)13917649c873SBart Van Assche static int read_node_page(struct page *page, blk_opf_t op_flags)
1392e05df3b1SJaegeuk Kim {
13932eaa98e5SChao Yu struct folio *folio = page_folio(page);
13944081363fSJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1395e05df3b1SJaegeuk Kim struct node_info ni;
1396cf04e8ebSJaegeuk Kim struct f2fs_io_info fio = {
139705ca3632SJaegeuk Kim .sbi = sbi,
1398cf04e8ebSJaegeuk Kim .type = NODE,
139904d328deSMike Christie .op = REQ_OP_READ,
140004d328deSMike Christie .op_flags = op_flags,
140105ca3632SJaegeuk Kim .page = page,
14024375a336SJaegeuk Kim .encrypted_page = NULL,
1403cf04e8ebSJaegeuk Kim };
14047735730dSChao Yu int err;
1405e05df3b1SJaegeuk Kim
14062eaa98e5SChao Yu if (folio_test_uptodate(folio)) {
1407b42b179bSChao Yu if (!f2fs_inode_chksum_verify(sbi, page)) {
14082eaa98e5SChao Yu folio_clear_uptodate(folio);
140910f966bbSChao Yu return -EFSBADCRC;
1410b42b179bSChao Yu }
14113bdad3c7SJaegeuk Kim return LOCKED_PAGE;
141254c55c4eSWeichao Guo }
14133bdad3c7SJaegeuk Kim
14142eaa98e5SChao Yu err = f2fs_get_node_info(sbi, folio->index, &ni, false);
14157735730dSChao Yu if (err)
14167735730dSChao Yu return err;
1417e05df3b1SJaegeuk Kim
1418b7ec2061SJaegeuk Kim /* NEW_ADDR can be seen, after cp_error drops some dirty node pages */
1419e6ecb142SJaegeuk Kim if (unlikely(ni.blk_addr == NULL_ADDR || ni.blk_addr == NEW_ADDR)) {
14202eaa98e5SChao Yu folio_clear_uptodate(folio);
1421e05df3b1SJaegeuk Kim return -ENOENT;
1422393ff91fSJaegeuk Kim }
1423393ff91fSJaegeuk Kim
14247a9d7548SChao Yu fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr;
14258b83ac81SChao Yu
14268b83ac81SChao Yu err = f2fs_submit_page_bio(&fio);
14278b83ac81SChao Yu
14288b83ac81SChao Yu if (!err)
142934a23525SChao Yu f2fs_update_iostat(sbi, NULL, FS_NODE_READ_IO, F2FS_BLKSIZE);
14308b83ac81SChao Yu
14318b83ac81SChao Yu return err;
1432e05df3b1SJaegeuk Kim }
1433e05df3b1SJaegeuk Kim
14340a8165d7SJaegeuk Kim /*
1435e05df3b1SJaegeuk Kim * Readahead a node page
1436e05df3b1SJaegeuk Kim */
f2fs_ra_node_page(struct f2fs_sb_info * sbi,nid_t nid)14374d57b86dSChao Yu void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
1438e05df3b1SJaegeuk Kim {
1439e05df3b1SJaegeuk Kim struct page *apage;
144056ae674cSJaegeuk Kim int err;
1441e05df3b1SJaegeuk Kim
1442e8458725SChao Yu if (!nid)
1443e8458725SChao Yu return;
14444d57b86dSChao Yu if (f2fs_check_nid_range(sbi, nid))
1445a4f843bdSJaegeuk Kim return;
1446e8458725SChao Yu
14475ec2d99dSMatthew Wilcox apage = xa_load(&NODE_MAPPING(sbi)->i_pages, nid);
1448999270deSFan Li if (apage)
1449393ff91fSJaegeuk Kim return;
1450e05df3b1SJaegeuk Kim
1451300e129cSJaegeuk Kim apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1452e05df3b1SJaegeuk Kim if (!apage)
1453e05df3b1SJaegeuk Kim return;
1454e05df3b1SJaegeuk Kim
145570246286SChristoph Hellwig err = read_node_page(apage, REQ_RAHEAD);
145686531d6bSJaegeuk Kim f2fs_put_page(apage, err ? 1 : 0);
1457e05df3b1SJaegeuk Kim }
1458e05df3b1SJaegeuk Kim
sanity_check_node_footer(struct f2fs_sb_info * sbi,struct page * page,pgoff_t nid,enum node_type ntype)14591cf6b567SChao Yu static int sanity_check_node_footer(struct f2fs_sb_info *sbi,
14601cf6b567SChao Yu struct page *page, pgoff_t nid,
14611cf6b567SChao Yu enum node_type ntype)
14621cf6b567SChao Yu {
14631cf6b567SChao Yu if (unlikely(nid != nid_of_node(page) ||
14642aac2538SChao Yu (ntype == NODE_TYPE_INODE && !IS_INODE(page)) ||
14652aac2538SChao Yu (ntype == NODE_TYPE_XATTR &&
14661788971eSChao Yu !f2fs_has_xattr_block(ofs_of_node(page))) ||
14671788971eSChao Yu time_to_inject(sbi, FAULT_INCONSISTENT_FOOTER))) {
14681cf6b567SChao Yu f2fs_warn(sbi, "inconsistent node block, node_type:%d, nid:%lu, "
14691cf6b567SChao Yu "node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]",
14701cf6b567SChao Yu ntype, nid, nid_of_node(page), ino_of_node(page),
14711cf6b567SChao Yu ofs_of_node(page), cpver_of_node(page),
14721cf6b567SChao Yu next_blkaddr_of_node(page));
14731cf6b567SChao Yu set_sbi_flag(sbi, SBI_NEED_FSCK);
14741cf6b567SChao Yu f2fs_handle_error(sbi, ERROR_INCONSISTENT_FOOTER);
14751cf6b567SChao Yu return -EFSCORRUPTED;
14761cf6b567SChao Yu }
14771cf6b567SChao Yu return 0;
14781cf6b567SChao Yu }
14791cf6b567SChao Yu
__get_node_folio(struct f2fs_sb_info * sbi,pgoff_t nid,struct page * parent,int start,enum node_type ntype)14804d417ae2SMatthew Wilcox (Oracle) static struct folio *__get_node_folio(struct f2fs_sb_info *sbi, pgoff_t nid,
14811cf6b567SChao Yu struct page *parent, int start,
14821cf6b567SChao Yu enum node_type ntype)
1483e05df3b1SJaegeuk Kim {
148448a34c59SMatthew Wilcox (Oracle) struct folio *folio;
148556ae674cSJaegeuk Kim int err;
14864aa69d56SJaegeuk Kim
14874aa69d56SJaegeuk Kim if (!nid)
14884aa69d56SJaegeuk Kim return ERR_PTR(-ENOENT);
14894d57b86dSChao Yu if (f2fs_check_nid_range(sbi, nid))
1490a4f843bdSJaegeuk Kim return ERR_PTR(-EINVAL);
1491afcb7ca0SJaegeuk Kim repeat:
149248a34c59SMatthew Wilcox (Oracle) folio = f2fs_grab_cache_folio(NODE_MAPPING(sbi), nid, false);
149348a34c59SMatthew Wilcox (Oracle) if (IS_ERR(folio))
14944d417ae2SMatthew Wilcox (Oracle) return folio;
1495e05df3b1SJaegeuk Kim
149648a34c59SMatthew Wilcox (Oracle) err = read_node_page(&folio->page, 0);
149786531d6bSJaegeuk Kim if (err < 0) {
1498a7b8618aSJaegeuk Kim goto out_put_err;
1499e1c51b9fSChao Yu } else if (err == LOCKED_PAGE) {
15001f258ec1SChao Yu err = 0;
1501e1c51b9fSChao Yu goto page_hit;
150286531d6bSJaegeuk Kim }
1503aaf96075SJaegeuk Kim
15040e022ea8SChao Yu if (parent)
15054d57b86dSChao Yu f2fs_ra_node_pages(parent, start + 1, MAX_RA_NODE);
15060e022ea8SChao Yu
150748a34c59SMatthew Wilcox (Oracle) folio_lock(folio);
1508e1c51b9fSChao Yu
150948a34c59SMatthew Wilcox (Oracle) if (unlikely(folio->mapping != NODE_MAPPING(sbi))) {
151048a34c59SMatthew Wilcox (Oracle) f2fs_folio_put(folio, true);
1511afcb7ca0SJaegeuk Kim goto repeat;
1512afcb7ca0SJaegeuk Kim }
15131563ac75SChao Yu
151448a34c59SMatthew Wilcox (Oracle) if (unlikely(!folio_test_uptodate(folio))) {
15151f258ec1SChao Yu err = -EIO;
15161563ac75SChao Yu goto out_err;
15171f258ec1SChao Yu }
1518704956ecSChao Yu
151948a34c59SMatthew Wilcox (Oracle) if (!f2fs_inode_chksum_verify(sbi, &folio->page)) {
152010f966bbSChao Yu err = -EFSBADCRC;
1521704956ecSChao Yu goto out_err;
1522704956ecSChao Yu }
1523e1c51b9fSChao Yu page_hit:
15241cf6b567SChao Yu err = sanity_check_node_footer(sbi, &folio->page, nid, ntype);
15251cf6b567SChao Yu if (!err)
15264d417ae2SMatthew Wilcox (Oracle) return folio;
15270c9df7fbSYunlong Song out_err:
152848a34c59SMatthew Wilcox (Oracle) folio_clear_uptodate(folio);
1529a7b8618aSJaegeuk Kim out_put_err:
153082c7863eSJaegeuk Kim /* ENOENT comes from read_node_page which is not an error. */
153182c7863eSJaegeuk Kim if (err != -ENOENT)
153248a34c59SMatthew Wilcox (Oracle) f2fs_handle_page_eio(sbi, folio, NODE);
153348a34c59SMatthew Wilcox (Oracle) f2fs_folio_put(folio, true);
15341f258ec1SChao Yu return ERR_PTR(err);
15350c9df7fbSYunlong Song }
1536e05df3b1SJaegeuk Kim
f2fs_get_node_page(struct f2fs_sb_info * sbi,pgoff_t nid)15374d57b86dSChao Yu struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
15380e022ea8SChao Yu {
15391cf6b567SChao Yu struct folio *folio = __get_node_folio(sbi, nid, NULL, 0,
15401cf6b567SChao Yu NODE_TYPE_REGULAR);
15411cf6b567SChao Yu
15421cf6b567SChao Yu return &folio->page;
15431cf6b567SChao Yu }
15441cf6b567SChao Yu
f2fs_get_inode_folio(struct f2fs_sb_info * sbi,pgoff_t ino)15451cf6b567SChao Yu struct folio *f2fs_get_inode_folio(struct f2fs_sb_info *sbi, pgoff_t ino)
15461cf6b567SChao Yu {
15471cf6b567SChao Yu return __get_node_folio(sbi, ino, NULL, 0, NODE_TYPE_INODE);
15481cf6b567SChao Yu }
15491cf6b567SChao Yu
f2fs_get_inode_page(struct f2fs_sb_info * sbi,pgoff_t ino)15501cf6b567SChao Yu struct page *f2fs_get_inode_page(struct f2fs_sb_info *sbi, pgoff_t ino)
15511cf6b567SChao Yu {
15521cf6b567SChao Yu struct folio *folio = f2fs_get_inode_folio(sbi, ino);
15534d417ae2SMatthew Wilcox (Oracle)
15544d417ae2SMatthew Wilcox (Oracle) return &folio->page;
15550e022ea8SChao Yu }
15560e022ea8SChao Yu
f2fs_get_xnode_page(struct f2fs_sb_info * sbi,pgoff_t xnid)15572aac2538SChao Yu struct page *f2fs_get_xnode_page(struct f2fs_sb_info *sbi, pgoff_t xnid)
15582aac2538SChao Yu {
15592aac2538SChao Yu struct folio *folio = __get_node_folio(sbi, xnid, NULL, 0,
15602aac2538SChao Yu NODE_TYPE_XATTR);
15612aac2538SChao Yu
15622aac2538SChao Yu return &folio->page;
15632aac2538SChao Yu }
15642aac2538SChao Yu
f2fs_get_node_page_ra(struct page * parent,int start)15654d57b86dSChao Yu struct page *f2fs_get_node_page_ra(struct page *parent, int start)
1566e05df3b1SJaegeuk Kim {
15674081363fSJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
15680e022ea8SChao Yu nid_t nid = get_nid(parent, start, false);
15691cf6b567SChao Yu struct folio *folio = __get_node_folio(sbi, nid, parent, start,
15701cf6b567SChao Yu NODE_TYPE_REGULAR);
1571e05df3b1SJaegeuk Kim
15724d417ae2SMatthew Wilcox (Oracle) return &folio->page;
1573e05df3b1SJaegeuk Kim }
1574e05df3b1SJaegeuk Kim
flush_inline_data(struct f2fs_sb_info * sbi,nid_t ino)15752049d4fcSJaegeuk Kim static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
15762049d4fcSJaegeuk Kim {
15772049d4fcSJaegeuk Kim struct inode *inode;
15782049d4fcSJaegeuk Kim struct page *page;
15790f3311a8SChao Yu int ret;
15802049d4fcSJaegeuk Kim
15812049d4fcSJaegeuk Kim /* should flush inline_data before evict_inode */
15822049d4fcSJaegeuk Kim inode = ilookup(sbi->sb, ino);
15832049d4fcSJaegeuk Kim if (!inode)
15842049d4fcSJaegeuk Kim return;
15852049d4fcSJaegeuk Kim
158601eccef7SChao Yu page = f2fs_pagecache_get_page(inode->i_mapping, 0,
158701eccef7SChao Yu FGP_LOCK|FGP_NOWAIT, 0);
15882049d4fcSJaegeuk Kim if (!page)
15892049d4fcSJaegeuk Kim goto iput_out;
15902049d4fcSJaegeuk Kim
15912049d4fcSJaegeuk Kim if (!PageUptodate(page))
15922049d4fcSJaegeuk Kim goto page_out;
15932049d4fcSJaegeuk Kim
15942049d4fcSJaegeuk Kim if (!PageDirty(page))
15952049d4fcSJaegeuk Kim goto page_out;
15962049d4fcSJaegeuk Kim
15972049d4fcSJaegeuk Kim if (!clear_page_dirty_for_io(page))
15982049d4fcSJaegeuk Kim goto page_out;
15992049d4fcSJaegeuk Kim
1600b0846621SChao Yu ret = f2fs_write_inline_data(inode, page_folio(page));
16012049d4fcSJaegeuk Kim inode_dec_dirty_pages(inode);
16024d57b86dSChao Yu f2fs_remove_dirty_inode(inode);
16030f3311a8SChao Yu if (ret)
16042049d4fcSJaegeuk Kim set_page_dirty(page);
16052049d4fcSJaegeuk Kim page_out:
16064a6de50dSJaegeuk Kim f2fs_put_page(page, 1);
16072049d4fcSJaegeuk Kim iput_out:
16082049d4fcSJaegeuk Kim iput(inode);
16092049d4fcSJaegeuk Kim }
16102049d4fcSJaegeuk Kim
last_fsync_dnode(struct f2fs_sb_info * sbi,nid_t ino)1611e11a3113SMatthew Wilcox (Oracle) static struct folio *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
1612e05df3b1SJaegeuk Kim {
1613028a63a6SJan Kara pgoff_t index;
16144f4a4f0fSVishal Moola (Oracle) struct folio_batch fbatch;
161518f3814fSMatthew Wilcox (Oracle) struct folio *last_folio = NULL;
16164f4a4f0fSVishal Moola (Oracle) int nr_folios;
161752681375SJaegeuk Kim
16184f4a4f0fSVishal Moola (Oracle) folio_batch_init(&fbatch);
161952681375SJaegeuk Kim index = 0;
162052681375SJaegeuk Kim
16214f4a4f0fSVishal Moola (Oracle) while ((nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), &index,
16224f4a4f0fSVishal Moola (Oracle) (pgoff_t)-1, PAGECACHE_TAG_DIRTY,
16234f4a4f0fSVishal Moola (Oracle) &fbatch))) {
1624028a63a6SJan Kara int i;
162552681375SJaegeuk Kim
16264f4a4f0fSVishal Moola (Oracle) for (i = 0; i < nr_folios; i++) {
162718f3814fSMatthew Wilcox (Oracle) struct folio *folio = fbatch.folios[i];
162852681375SJaegeuk Kim
162952681375SJaegeuk Kim if (unlikely(f2fs_cp_error(sbi))) {
163018f3814fSMatthew Wilcox (Oracle) f2fs_folio_put(last_folio, false);
16314f4a4f0fSVishal Moola (Oracle) folio_batch_release(&fbatch);
1632608514deSJaegeuk Kim return ERR_PTR(-EIO);
163352681375SJaegeuk Kim }
163452681375SJaegeuk Kim
163518f3814fSMatthew Wilcox (Oracle) if (!IS_DNODE(&folio->page) || !is_cold_node(&folio->page))
163652681375SJaegeuk Kim continue;
163718f3814fSMatthew Wilcox (Oracle) if (ino_of_node(&folio->page) != ino)
163852681375SJaegeuk Kim continue;
163952681375SJaegeuk Kim
164018f3814fSMatthew Wilcox (Oracle) folio_lock(folio);
164152681375SJaegeuk Kim
164218f3814fSMatthew Wilcox (Oracle) if (unlikely(folio->mapping != NODE_MAPPING(sbi))) {
164352681375SJaegeuk Kim continue_unlock:
164418f3814fSMatthew Wilcox (Oracle) folio_unlock(folio);
164552681375SJaegeuk Kim continue;
164652681375SJaegeuk Kim }
164718f3814fSMatthew Wilcox (Oracle) if (ino_of_node(&folio->page) != ino)
164852681375SJaegeuk Kim goto continue_unlock;
164952681375SJaegeuk Kim
165018f3814fSMatthew Wilcox (Oracle) if (!folio_test_dirty(folio)) {
165152681375SJaegeuk Kim /* someone wrote it for us */
165252681375SJaegeuk Kim goto continue_unlock;
165352681375SJaegeuk Kim }
165452681375SJaegeuk Kim
165518f3814fSMatthew Wilcox (Oracle) if (last_folio)
165618f3814fSMatthew Wilcox (Oracle) f2fs_folio_put(last_folio, false);
1657608514deSJaegeuk Kim
165818f3814fSMatthew Wilcox (Oracle) folio_get(folio);
165918f3814fSMatthew Wilcox (Oracle) last_folio = folio;
166018f3814fSMatthew Wilcox (Oracle) folio_unlock(folio);
1661608514deSJaegeuk Kim }
16624f4a4f0fSVishal Moola (Oracle) folio_batch_release(&fbatch);
1663608514deSJaegeuk Kim cond_resched();
1664608514deSJaegeuk Kim }
1665e11a3113SMatthew Wilcox (Oracle) return last_folio;
1666608514deSJaegeuk Kim }
1667608514deSJaegeuk Kim
__write_node_page(struct page * page,bool atomic,bool * submitted,struct writeback_control * wbc,bool do_balance,enum iostat_type io_type,unsigned int * seq_id)1668d68f735bSJaegeuk Kim static int __write_node_page(struct page *page, bool atomic, bool *submitted,
1669b0af6d49SChao Yu struct writeback_control *wbc, bool do_balance,
167050fa53ecSChao Yu enum iostat_type io_type, unsigned int *seq_id)
1671faa24895SJaegeuk Kim {
1672faa24895SJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_P_SB(page);
16734deccfbdSChao Yu struct folio *folio = page_folio(page);
1674faa24895SJaegeuk Kim nid_t nid;
1675faa24895SJaegeuk Kim struct node_info ni;
1676faa24895SJaegeuk Kim struct f2fs_io_info fio = {
1677faa24895SJaegeuk Kim .sbi = sbi,
167839d787beSChao Yu .ino = ino_of_node(page),
1679faa24895SJaegeuk Kim .type = NODE,
1680faa24895SJaegeuk Kim .op = REQ_OP_WRITE,
1681faa24895SJaegeuk Kim .op_flags = wbc_to_write_flags(wbc),
1682faa24895SJaegeuk Kim .page = page,
1683faa24895SJaegeuk Kim .encrypted_page = NULL,
16842eae077eSChao Yu .submitted = 0,
1685b0af6d49SChao Yu .io_type = io_type,
1686578c6478SYufen Yu .io_wbc = wbc,
1687faa24895SJaegeuk Kim };
168850fa53ecSChao Yu unsigned int seq;
1689faa24895SJaegeuk Kim
16904deccfbdSChao Yu trace_f2fs_writepage(folio, NODE);
1691faa24895SJaegeuk Kim
16926d7c865cSJaegeuk Kim if (unlikely(f2fs_cp_error(sbi))) {
1693b62e71beSChao Yu /* keep node pages in remount-ro mode */
1694b62e71beSChao Yu if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_READONLY)
1695b62e71beSChao Yu goto redirty_out;
16964deccfbdSChao Yu folio_clear_uptodate(folio);
16976d7c865cSJaegeuk Kim dec_page_count(sbi, F2FS_DIRTY_NODES);
16984deccfbdSChao Yu folio_unlock(folio);
16996d7c865cSJaegeuk Kim return 0;
17006d7c865cSJaegeuk Kim }
1701db198ae0SChao Yu
1702faa24895SJaegeuk Kim if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1703faa24895SJaegeuk Kim goto redirty_out;
1704faa24895SJaegeuk Kim
1705100c0655SJaegeuk Kim if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
1706100c0655SJaegeuk Kim wbc->sync_mode == WB_SYNC_NONE &&
1707fd8c8cafSChao Yu IS_DNODE(page) && is_cold_node(page))
1708fd8c8cafSChao Yu goto redirty_out;
1709fd8c8cafSChao Yu
1710faa24895SJaegeuk Kim /* get old block addr of this node page */
1711faa24895SJaegeuk Kim nid = nid_of_node(page);
17124deccfbdSChao Yu f2fs_bug_on(sbi, folio->index != nid);
1713faa24895SJaegeuk Kim
1714a9419b63SJaegeuk Kim if (f2fs_get_node_info(sbi, nid, &ni, !do_balance))
17157735730dSChao Yu goto redirty_out;
17167735730dSChao Yu
1717faa24895SJaegeuk Kim if (wbc->for_reclaim) {
1718e4544b63STim Murray if (!f2fs_down_read_trylock(&sbi->node_write))
1719faa24895SJaegeuk Kim goto redirty_out;
1720faa24895SJaegeuk Kim } else {
1721e4544b63STim Murray f2fs_down_read(&sbi->node_write);
1722faa24895SJaegeuk Kim }
1723faa24895SJaegeuk Kim
1724faa24895SJaegeuk Kim /* This page is already truncated */
1725faa24895SJaegeuk Kim if (unlikely(ni.blk_addr == NULL_ADDR)) {
17264deccfbdSChao Yu folio_clear_uptodate(folio);
1727faa24895SJaegeuk Kim dec_page_count(sbi, F2FS_DIRTY_NODES);
1728e4544b63STim Murray f2fs_up_read(&sbi->node_write);
17294deccfbdSChao Yu folio_unlock(folio);
1730faa24895SJaegeuk Kim return 0;
1731faa24895SJaegeuk Kim }
1732faa24895SJaegeuk Kim
1733c9b60788SChao Yu if (__is_valid_data_blkaddr(ni.blk_addr) &&
173493770ab7SChao Yu !f2fs_is_valid_blkaddr(sbi, ni.blk_addr,
173593770ab7SChao Yu DATA_GENERIC_ENHANCE)) {
1736e4544b63STim Murray f2fs_up_read(&sbi->node_write);
1737c9b60788SChao Yu goto redirty_out;
173889d13c38SJaegeuk Kim }
1739c9b60788SChao Yu
1740b722ff8aSWenjie Cheng if (atomic && !test_opt(sbi, NOBARRIER))
1741e7c75ab0SJaegeuk Kim fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
1742e7c75ab0SJaegeuk Kim
1743dc5a9412SChao Yu /* should add to global list before clearing PAGECACHE status */
17441a58a41cSMatthew Wilcox (Oracle) if (f2fs_in_warm_node_list(sbi, folio)) {
174550fa53ecSChao Yu seq = f2fs_add_fsync_node_entry(sbi, page);
174650fa53ecSChao Yu if (seq_id)
174750fa53ecSChao Yu *seq_id = seq;
174850fa53ecSChao Yu }
174950fa53ecSChao Yu
17504deccfbdSChao Yu folio_start_writeback(folio);
1751dc5a9412SChao Yu
1752faa24895SJaegeuk Kim fio.old_blkaddr = ni.blk_addr;
17534d57b86dSChao Yu f2fs_do_write_node_page(nid, &fio);
1754faa24895SJaegeuk Kim set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
1755faa24895SJaegeuk Kim dec_page_count(sbi, F2FS_DIRTY_NODES);
1756e4544b63STim Murray f2fs_up_read(&sbi->node_write);
1757faa24895SJaegeuk Kim
1758d68f735bSJaegeuk Kim if (wbc->for_reclaim) {
1759bab475c5SChao Yu f2fs_submit_merged_write_cond(sbi, NULL, page, 0, NODE);
1760d68f735bSJaegeuk Kim submitted = NULL;
1761d68f735bSJaegeuk Kim }
1762faa24895SJaegeuk Kim
17634deccfbdSChao Yu folio_unlock(folio);
1764faa24895SJaegeuk Kim
1765d68f735bSJaegeuk Kim if (unlikely(f2fs_cp_error(sbi))) {
1766b9109b0eSJaegeuk Kim f2fs_submit_merged_write(sbi, NODE);
1767d68f735bSJaegeuk Kim submitted = NULL;
1768d68f735bSJaegeuk Kim }
1769d68f735bSJaegeuk Kim if (submitted)
1770d68f735bSJaegeuk Kim *submitted = fio.submitted;
1771faa24895SJaegeuk Kim
1772401db79fSYunlong Song if (do_balance)
1773401db79fSYunlong Song f2fs_balance_fs(sbi, false);
1774faa24895SJaegeuk Kim return 0;
1775faa24895SJaegeuk Kim
1776faa24895SJaegeuk Kim redirty_out:
17774deccfbdSChao Yu folio_redirty_for_writepage(wbc, folio);
1778faa24895SJaegeuk Kim return AOP_WRITEPAGE_ACTIVATE;
1779faa24895SJaegeuk Kim }
1780faa24895SJaegeuk Kim
f2fs_move_node_page(struct page * node_page,int gc_type)178148018b4cSChao Yu int f2fs_move_node_page(struct page *node_page, int gc_type)
1782f15194fcSYunlei He {
178348018b4cSChao Yu int err = 0;
178448018b4cSChao Yu
1785f15194fcSYunlei He if (gc_type == FG_GC) {
1786f15194fcSYunlei He struct writeback_control wbc = {
1787f15194fcSYunlei He .sync_mode = WB_SYNC_ALL,
1788f15194fcSYunlei He .nr_to_write = 1,
1789f15194fcSYunlei He .for_reclaim = 0,
1790f15194fcSYunlei He };
1791f15194fcSYunlei He
1792bae0ee7aSChao Yu f2fs_wait_on_page_writeback(node_page, NODE, true, true);
17938d64d365SChao Yu
17948d64d365SChao Yu set_page_dirty(node_page);
17958d64d365SChao Yu
179648018b4cSChao Yu if (!clear_page_dirty_for_io(node_page)) {
179748018b4cSChao Yu err = -EAGAIN;
1798f15194fcSYunlei He goto out_page;
179948018b4cSChao Yu }
1800f15194fcSYunlei He
1801f15194fcSYunlei He if (__write_node_page(node_page, false, NULL,
180248018b4cSChao Yu &wbc, false, FS_GC_NODE_IO, NULL)) {
180348018b4cSChao Yu err = -EAGAIN;
1804f15194fcSYunlei He unlock_page(node_page);
180548018b4cSChao Yu }
1806f15194fcSYunlei He goto release_page;
1807f15194fcSYunlei He } else {
1808f15194fcSYunlei He /* set page dirty and write it */
180916778aeaSJaegeuk Kim if (!folio_test_writeback(page_folio(node_page)))
1810f15194fcSYunlei He set_page_dirty(node_page);
1811f15194fcSYunlei He }
1812f15194fcSYunlei He out_page:
1813f15194fcSYunlei He unlock_page(node_page);
1814f15194fcSYunlei He release_page:
1815f15194fcSYunlei He f2fs_put_page(node_page, 0);
181648018b4cSChao Yu return err;
1817f15194fcSYunlei He }
1818f15194fcSYunlei He
f2fs_fsync_node_pages(struct f2fs_sb_info * sbi,struct inode * inode,struct writeback_control * wbc,bool atomic,unsigned int * seq_id)18194d57b86dSChao Yu int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
182050fa53ecSChao Yu struct writeback_control *wbc, bool atomic,
182150fa53ecSChao Yu unsigned int *seq_id)
1822608514deSJaegeuk Kim {
1823028a63a6SJan Kara pgoff_t index;
1824e6e46e1eSVishal Moola (Oracle) struct folio_batch fbatch;
1825608514deSJaegeuk Kim int ret = 0;
1826e11a3113SMatthew Wilcox (Oracle) struct folio *last_folio = NULL;
1827608514deSJaegeuk Kim bool marked = false;
182826de9b11SJaegeuk Kim nid_t ino = inode->i_ino;
1829e6e46e1eSVishal Moola (Oracle) int nr_folios;
1830bab475c5SChao Yu int nwritten = 0;
1831608514deSJaegeuk Kim
1832608514deSJaegeuk Kim if (atomic) {
1833e11a3113SMatthew Wilcox (Oracle) last_folio = last_fsync_dnode(sbi, ino);
1834e11a3113SMatthew Wilcox (Oracle) if (IS_ERR_OR_NULL(last_folio))
1835e11a3113SMatthew Wilcox (Oracle) return PTR_ERR_OR_ZERO(last_folio);
1836608514deSJaegeuk Kim }
1837608514deSJaegeuk Kim retry:
1838e6e46e1eSVishal Moola (Oracle) folio_batch_init(&fbatch);
1839608514deSJaegeuk Kim index = 0;
1840608514deSJaegeuk Kim
1841e6e46e1eSVishal Moola (Oracle) while ((nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), &index,
1842e6e46e1eSVishal Moola (Oracle) (pgoff_t)-1, PAGECACHE_TAG_DIRTY,
1843e6e46e1eSVishal Moola (Oracle) &fbatch))) {
1844028a63a6SJan Kara int i;
1845608514deSJaegeuk Kim
1846e6e46e1eSVishal Moola (Oracle) for (i = 0; i < nr_folios; i++) {
1847e23bebc3SMatthew Wilcox (Oracle) struct folio *folio = fbatch.folios[i];
1848d68f735bSJaegeuk Kim bool submitted = false;
1849608514deSJaegeuk Kim
1850608514deSJaegeuk Kim if (unlikely(f2fs_cp_error(sbi))) {
1851e11a3113SMatthew Wilcox (Oracle) f2fs_folio_put(last_folio, false);
1852e6e46e1eSVishal Moola (Oracle) folio_batch_release(&fbatch);
18539de69279SChao Yu ret = -EIO;
18549de69279SChao Yu goto out;
1855608514deSJaegeuk Kim }
1856608514deSJaegeuk Kim
1857e23bebc3SMatthew Wilcox (Oracle) if (!IS_DNODE(&folio->page) || !is_cold_node(&folio->page))
1858608514deSJaegeuk Kim continue;
1859e23bebc3SMatthew Wilcox (Oracle) if (ino_of_node(&folio->page) != ino)
1860608514deSJaegeuk Kim continue;
1861608514deSJaegeuk Kim
1862e23bebc3SMatthew Wilcox (Oracle) folio_lock(folio);
1863608514deSJaegeuk Kim
1864e23bebc3SMatthew Wilcox (Oracle) if (unlikely(folio->mapping != NODE_MAPPING(sbi))) {
1865608514deSJaegeuk Kim continue_unlock:
1866e23bebc3SMatthew Wilcox (Oracle) folio_unlock(folio);
1867608514deSJaegeuk Kim continue;
1868608514deSJaegeuk Kim }
1869e23bebc3SMatthew Wilcox (Oracle) if (ino_of_node(&folio->page) != ino)
187052681375SJaegeuk Kim goto continue_unlock;
187152681375SJaegeuk Kim
1872e11a3113SMatthew Wilcox (Oracle) if (!folio_test_dirty(folio) && folio != last_folio) {
1873608514deSJaegeuk Kim /* someone wrote it for us */
1874608514deSJaegeuk Kim goto continue_unlock;
1875608514deSJaegeuk Kim }
1876608514deSJaegeuk Kim
1877e23bebc3SMatthew Wilcox (Oracle) f2fs_folio_wait_writeback(folio, NODE, true, true);
1878608514deSJaegeuk Kim
1879e23bebc3SMatthew Wilcox (Oracle) set_fsync_mark(&folio->page, 0);
1880e23bebc3SMatthew Wilcox (Oracle) set_dentry_mark(&folio->page, 0);
1881d29fd172SJaegeuk Kim
1882e11a3113SMatthew Wilcox (Oracle) if (!atomic || folio == last_folio) {
1883e23bebc3SMatthew Wilcox (Oracle) set_fsync_mark(&folio->page, 1);
188447c8ebccSJaegeuk Kim percpu_counter_inc(&sbi->rf_node_block_count);
1885e23bebc3SMatthew Wilcox (Oracle) if (IS_INODE(&folio->page)) {
188626de9b11SJaegeuk Kim if (is_inode_flag_set(inode,
188726de9b11SJaegeuk Kim FI_DIRTY_INODE))
1888e23bebc3SMatthew Wilcox (Oracle) f2fs_update_inode(inode, &folio->page);
1889e23bebc3SMatthew Wilcox (Oracle) set_dentry_mark(&folio->page,
18904d57b86dSChao Yu f2fs_need_dentry_mark(sbi, ino));
189126de9b11SJaegeuk Kim }
1892608514deSJaegeuk Kim /* may be written by other thread */
1893e23bebc3SMatthew Wilcox (Oracle) if (!folio_test_dirty(folio))
1894e23bebc3SMatthew Wilcox (Oracle) folio_mark_dirty(folio);
1895608514deSJaegeuk Kim }
1896608514deSJaegeuk Kim
1897e23bebc3SMatthew Wilcox (Oracle) if (!folio_clear_dirty_for_io(folio))
1898608514deSJaegeuk Kim goto continue_unlock;
189952681375SJaegeuk Kim
1900e23bebc3SMatthew Wilcox (Oracle) ret = __write_node_page(&folio->page, atomic &&
1901e11a3113SMatthew Wilcox (Oracle) folio == last_folio,
1902b0af6d49SChao Yu &submitted, wbc, true,
190350fa53ecSChao Yu FS_NODE_IO, seq_id);
1904c267ec15SJaegeuk Kim if (ret) {
1905e23bebc3SMatthew Wilcox (Oracle) folio_unlock(folio);
1906e11a3113SMatthew Wilcox (Oracle) f2fs_folio_put(last_folio, false);
1907608514deSJaegeuk Kim break;
1908d68f735bSJaegeuk Kim } else if (submitted) {
1909bab475c5SChao Yu nwritten++;
1910608514deSJaegeuk Kim }
19113f5f4959SChao Yu
1912e11a3113SMatthew Wilcox (Oracle) if (folio == last_folio) {
1913e23bebc3SMatthew Wilcox (Oracle) f2fs_folio_put(folio, false);
1914608514deSJaegeuk Kim marked = true;
191552681375SJaegeuk Kim break;
191652681375SJaegeuk Kim }
1917c267ec15SJaegeuk Kim }
1918e6e46e1eSVishal Moola (Oracle) folio_batch_release(&fbatch);
191952681375SJaegeuk Kim cond_resched();
192052681375SJaegeuk Kim
1921608514deSJaegeuk Kim if (ret || marked)
192252681375SJaegeuk Kim break;
192352681375SJaegeuk Kim }
1924608514deSJaegeuk Kim if (!ret && atomic && !marked) {
1925dcbb4c10SJoe Perches f2fs_debug(sbi, "Retry to write fsync mark: ino=%u, idx=%lx",
1926e11a3113SMatthew Wilcox (Oracle) ino, last_folio->index);
1927e11a3113SMatthew Wilcox (Oracle) folio_lock(last_folio);
1928e11a3113SMatthew Wilcox (Oracle) f2fs_folio_wait_writeback(last_folio, NODE, true, true);
1929e11a3113SMatthew Wilcox (Oracle) folio_mark_dirty(last_folio);
1930e11a3113SMatthew Wilcox (Oracle) folio_unlock(last_folio);
1931608514deSJaegeuk Kim goto retry;
1932608514deSJaegeuk Kim }
19339de69279SChao Yu out:
1934bab475c5SChao Yu if (nwritten)
1935bab475c5SChao Yu f2fs_submit_merged_write_cond(sbi, NULL, NULL, ino, NODE);
1936c267ec15SJaegeuk Kim return ret ? -EIO : 0;
193752681375SJaegeuk Kim }
193852681375SJaegeuk Kim
f2fs_match_ino(struct inode * inode,unsigned long ino,void * data)1939052a82d8SChao Yu static int f2fs_match_ino(struct inode *inode, unsigned long ino, void *data)
1940052a82d8SChao Yu {
1941052a82d8SChao Yu struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1942052a82d8SChao Yu bool clean;
1943052a82d8SChao Yu
1944052a82d8SChao Yu if (inode->i_ino != ino)
1945052a82d8SChao Yu return 0;
1946052a82d8SChao Yu
1947052a82d8SChao Yu if (!is_inode_flag_set(inode, FI_DIRTY_INODE))
1948052a82d8SChao Yu return 0;
1949052a82d8SChao Yu
1950052a82d8SChao Yu spin_lock(&sbi->inode_lock[DIRTY_META]);
1951052a82d8SChao Yu clean = list_empty(&F2FS_I(inode)->gdirty_list);
1952052a82d8SChao Yu spin_unlock(&sbi->inode_lock[DIRTY_META]);
1953052a82d8SChao Yu
1954052a82d8SChao Yu if (clean)
1955052a82d8SChao Yu return 0;
1956052a82d8SChao Yu
1957052a82d8SChao Yu inode = igrab(inode);
1958052a82d8SChao Yu if (!inode)
1959052a82d8SChao Yu return 0;
1960052a82d8SChao Yu return 1;
1961052a82d8SChao Yu }
1962052a82d8SChao Yu
flush_dirty_inode(struct folio * folio)1963de90f761SMatthew Wilcox (Oracle) static bool flush_dirty_inode(struct folio *folio)
1964052a82d8SChao Yu {
1965de90f761SMatthew Wilcox (Oracle) struct f2fs_sb_info *sbi = F2FS_F_SB(folio);
1966052a82d8SChao Yu struct inode *inode;
1967de90f761SMatthew Wilcox (Oracle) nid_t ino = ino_of_node(&folio->page);
1968052a82d8SChao Yu
1969052a82d8SChao Yu inode = find_inode_nowait(sbi->sb, ino, f2fs_match_ino, NULL);
1970052a82d8SChao Yu if (!inode)
1971052a82d8SChao Yu return false;
1972052a82d8SChao Yu
1973de90f761SMatthew Wilcox (Oracle) f2fs_update_inode(inode, &folio->page);
1974de90f761SMatthew Wilcox (Oracle) folio_unlock(folio);
1975052a82d8SChao Yu
1976052a82d8SChao Yu iput(inode);
1977052a82d8SChao Yu return true;
1978052a82d8SChao Yu }
1979052a82d8SChao Yu
f2fs_flush_inline_data(struct f2fs_sb_info * sbi)198068e79bafSJia Yang void f2fs_flush_inline_data(struct f2fs_sb_info *sbi)
198134c061adSSayali Lokhande {
198234c061adSSayali Lokhande pgoff_t index = 0;
1983a40a4ad1SVishal Moola (Oracle) struct folio_batch fbatch;
1984a40a4ad1SVishal Moola (Oracle) int nr_folios;
198534c061adSSayali Lokhande
1986a40a4ad1SVishal Moola (Oracle) folio_batch_init(&fbatch);
198734c061adSSayali Lokhande
1988a40a4ad1SVishal Moola (Oracle) while ((nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), &index,
1989a40a4ad1SVishal Moola (Oracle) (pgoff_t)-1, PAGECACHE_TAG_DIRTY,
1990a40a4ad1SVishal Moola (Oracle) &fbatch))) {
199134c061adSSayali Lokhande int i;
199234c061adSSayali Lokhande
1993a40a4ad1SVishal Moola (Oracle) for (i = 0; i < nr_folios; i++) {
1994015d9c56SMatthew Wilcox (Oracle) struct folio *folio = fbatch.folios[i];
199534c061adSSayali Lokhande
1996015d9c56SMatthew Wilcox (Oracle) if (!IS_INODE(&folio->page))
199734c061adSSayali Lokhande continue;
199834c061adSSayali Lokhande
1999015d9c56SMatthew Wilcox (Oracle) folio_lock(folio);
200034c061adSSayali Lokhande
2001015d9c56SMatthew Wilcox (Oracle) if (unlikely(folio->mapping != NODE_MAPPING(sbi)))
2002015d9c56SMatthew Wilcox (Oracle) goto unlock;
2003015d9c56SMatthew Wilcox (Oracle) if (!folio_test_dirty(folio))
2004015d9c56SMatthew Wilcox (Oracle) goto unlock;
200534c061adSSayali Lokhande
200634c061adSSayali Lokhande /* flush inline_data, if it's async context. */
2007015d9c56SMatthew Wilcox (Oracle) if (page_private_inline(&folio->page)) {
2008015d9c56SMatthew Wilcox (Oracle) clear_page_private_inline(&folio->page);
2009015d9c56SMatthew Wilcox (Oracle) folio_unlock(folio);
2010015d9c56SMatthew Wilcox (Oracle) flush_inline_data(sbi, ino_of_node(&folio->page));
201134c061adSSayali Lokhande continue;
201234c061adSSayali Lokhande }
2013015d9c56SMatthew Wilcox (Oracle) unlock:
2014015d9c56SMatthew Wilcox (Oracle) folio_unlock(folio);
201534c061adSSayali Lokhande }
2016a40a4ad1SVishal Moola (Oracle) folio_batch_release(&fbatch);
201734c061adSSayali Lokhande cond_resched();
201834c061adSSayali Lokhande }
201934c061adSSayali Lokhande }
202034c061adSSayali Lokhande
f2fs_sync_node_pages(struct f2fs_sb_info * sbi,struct writeback_control * wbc,bool do_balance,enum iostat_type io_type)20214d57b86dSChao Yu int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
20224d57b86dSChao Yu struct writeback_control *wbc,
2023b0af6d49SChao Yu bool do_balance, enum iostat_type io_type)
202452681375SJaegeuk Kim {
2025028a63a6SJan Kara pgoff_t index;
20267525486aSVishal Moola (Oracle) struct folio_batch fbatch;
202752681375SJaegeuk Kim int step = 0;
202812bb0a8fSJaegeuk Kim int nwritten = 0;
20293f5f4959SChao Yu int ret = 0;
20307525486aSVishal Moola (Oracle) int nr_folios, done = 0;
2031e05df3b1SJaegeuk Kim
20327525486aSVishal Moola (Oracle) folio_batch_init(&fbatch);
2033e05df3b1SJaegeuk Kim
2034e05df3b1SJaegeuk Kim next_step:
2035e05df3b1SJaegeuk Kim index = 0;
2036e05df3b1SJaegeuk Kim
20377525486aSVishal Moola (Oracle) while (!done && (nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi),
20387525486aSVishal Moola (Oracle) &index, (pgoff_t)-1, PAGECACHE_TAG_DIRTY,
20397525486aSVishal Moola (Oracle) &fbatch))) {
2040028a63a6SJan Kara int i;
2041e05df3b1SJaegeuk Kim
20427525486aSVishal Moola (Oracle) for (i = 0; i < nr_folios; i++) {
20435d0a9128SMatthew Wilcox (Oracle) struct folio *folio = fbatch.folios[i];
2044d68f735bSJaegeuk Kim bool submitted = false;
2045e05df3b1SJaegeuk Kim
2046c29fd0c0SChao Yu /* give a priority to WB_SYNC threads */
2047c29fd0c0SChao Yu if (atomic_read(&sbi->wb_sync_req[NODE]) &&
2048c29fd0c0SChao Yu wbc->sync_mode == WB_SYNC_NONE) {
2049c29fd0c0SChao Yu done = 1;
2050c29fd0c0SChao Yu break;
2051c29fd0c0SChao Yu }
2052c29fd0c0SChao Yu
2053e05df3b1SJaegeuk Kim /*
2054e05df3b1SJaegeuk Kim * flushing sequence with step:
2055e05df3b1SJaegeuk Kim * 0. indirect nodes
2056e05df3b1SJaegeuk Kim * 1. dentry dnodes
2057e05df3b1SJaegeuk Kim * 2. file dnodes
2058e05df3b1SJaegeuk Kim */
20595d0a9128SMatthew Wilcox (Oracle) if (step == 0 && IS_DNODE(&folio->page))
2060e05df3b1SJaegeuk Kim continue;
20615d0a9128SMatthew Wilcox (Oracle) if (step == 1 && (!IS_DNODE(&folio->page) ||
20625d0a9128SMatthew Wilcox (Oracle) is_cold_node(&folio->page)))
2063e05df3b1SJaegeuk Kim continue;
20645d0a9128SMatthew Wilcox (Oracle) if (step == 2 && (!IS_DNODE(&folio->page) ||
20655d0a9128SMatthew Wilcox (Oracle) !is_cold_node(&folio->page)))
2066e05df3b1SJaegeuk Kim continue;
20679a4cbc9eSChao Yu lock_node:
20684b270a8cSChao Yu if (wbc->sync_mode == WB_SYNC_ALL)
20695d0a9128SMatthew Wilcox (Oracle) folio_lock(folio);
20705d0a9128SMatthew Wilcox (Oracle) else if (!folio_trylock(folio))
2071e05df3b1SJaegeuk Kim continue;
2072e05df3b1SJaegeuk Kim
20735d0a9128SMatthew Wilcox (Oracle) if (unlikely(folio->mapping != NODE_MAPPING(sbi))) {
2074e05df3b1SJaegeuk Kim continue_unlock:
20755d0a9128SMatthew Wilcox (Oracle) folio_unlock(folio);
2076e05df3b1SJaegeuk Kim continue;
2077e05df3b1SJaegeuk Kim }
2078e05df3b1SJaegeuk Kim
20795d0a9128SMatthew Wilcox (Oracle) if (!folio_test_dirty(folio)) {
2080e05df3b1SJaegeuk Kim /* someone wrote it for us */
2081e05df3b1SJaegeuk Kim goto continue_unlock;
2082e05df3b1SJaegeuk Kim }
2083e05df3b1SJaegeuk Kim
2084b0f3b87fSJaegeuk Kim /* flush inline_data/inode, if it's async context. */
2085b0f3b87fSJaegeuk Kim if (!do_balance)
2086b0f3b87fSJaegeuk Kim goto write_node;
2087b0f3b87fSJaegeuk Kim
2088b0f3b87fSJaegeuk Kim /* flush inline_data */
20895d0a9128SMatthew Wilcox (Oracle) if (page_private_inline(&folio->page)) {
20905d0a9128SMatthew Wilcox (Oracle) clear_page_private_inline(&folio->page);
20915d0a9128SMatthew Wilcox (Oracle) folio_unlock(folio);
20925d0a9128SMatthew Wilcox (Oracle) flush_inline_data(sbi, ino_of_node(&folio->page));
20939a4cbc9eSChao Yu goto lock_node;
20942049d4fcSJaegeuk Kim }
20952049d4fcSJaegeuk Kim
2096052a82d8SChao Yu /* flush dirty inode */
2097de90f761SMatthew Wilcox (Oracle) if (IS_INODE(&folio->page) && flush_dirty_inode(folio))
2098052a82d8SChao Yu goto lock_node;
2099b0f3b87fSJaegeuk Kim write_node:
21005d0a9128SMatthew Wilcox (Oracle) f2fs_folio_wait_writeback(folio, NODE, true, true);
2101fa3d2bdfSJaegeuk Kim
21025d0a9128SMatthew Wilcox (Oracle) if (!folio_clear_dirty_for_io(folio))
2103e05df3b1SJaegeuk Kim goto continue_unlock;
2104e05df3b1SJaegeuk Kim
21055d0a9128SMatthew Wilcox (Oracle) set_fsync_mark(&folio->page, 0);
21065d0a9128SMatthew Wilcox (Oracle) set_dentry_mark(&folio->page, 0);
210752746519SJaegeuk Kim
21085d0a9128SMatthew Wilcox (Oracle) ret = __write_node_page(&folio->page, false, &submitted,
210950fa53ecSChao Yu wbc, do_balance, io_type, NULL);
2110d68f735bSJaegeuk Kim if (ret)
21115d0a9128SMatthew Wilcox (Oracle) folio_unlock(folio);
2112d68f735bSJaegeuk Kim else if (submitted)
21133f5f4959SChao Yu nwritten++;
2114e05df3b1SJaegeuk Kim
2115e05df3b1SJaegeuk Kim if (--wbc->nr_to_write == 0)
2116e05df3b1SJaegeuk Kim break;
2117e05df3b1SJaegeuk Kim }
21187525486aSVishal Moola (Oracle) folio_batch_release(&fbatch);
2119e05df3b1SJaegeuk Kim cond_resched();
2120e05df3b1SJaegeuk Kim
2121e05df3b1SJaegeuk Kim if (wbc->nr_to_write == 0) {
2122e05df3b1SJaegeuk Kim step = 2;
2123e05df3b1SJaegeuk Kim break;
2124e05df3b1SJaegeuk Kim }
2125e05df3b1SJaegeuk Kim }
2126e05df3b1SJaegeuk Kim
2127e05df3b1SJaegeuk Kim if (step < 2) {
2128100c0655SJaegeuk Kim if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
2129100c0655SJaegeuk Kim wbc->sync_mode == WB_SYNC_NONE && step == 1)
2130fd8c8cafSChao Yu goto out;
2131e05df3b1SJaegeuk Kim step++;
2132e05df3b1SJaegeuk Kim goto next_step;
2133e05df3b1SJaegeuk Kim }
2134fd8c8cafSChao Yu out:
21353f5f4959SChao Yu if (nwritten)
2136b9109b0eSJaegeuk Kim f2fs_submit_merged_write(sbi, NODE);
2137db198ae0SChao Yu
2138db198ae0SChao Yu if (unlikely(f2fs_cp_error(sbi)))
2139db198ae0SChao Yu return -EIO;
21403f5f4959SChao Yu return ret;
2141e05df3b1SJaegeuk Kim }
2142e05df3b1SJaegeuk Kim
f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info * sbi,unsigned int seq_id)214350fa53ecSChao Yu int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
214450fa53ecSChao Yu unsigned int seq_id)
2145cfe58f9dSJaegeuk Kim {
214650fa53ecSChao Yu struct fsync_node_entry *fn;
214750fa53ecSChao Yu struct page *page;
214850fa53ecSChao Yu struct list_head *head = &sbi->fsync_node_list;
214950fa53ecSChao Yu unsigned long flags;
215050fa53ecSChao Yu unsigned int cur_seq_id = 0;
2151cfe58f9dSJaegeuk Kim
215250fa53ecSChao Yu while (seq_id && cur_seq_id < seq_id) {
215350fa53ecSChao Yu spin_lock_irqsave(&sbi->fsync_node_lock, flags);
215450fa53ecSChao Yu if (list_empty(head)) {
215550fa53ecSChao Yu spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
215650fa53ecSChao Yu break;
215750fa53ecSChao Yu }
215850fa53ecSChao Yu fn = list_first_entry(head, struct fsync_node_entry, list);
215950fa53ecSChao Yu if (fn->seq_id > seq_id) {
216050fa53ecSChao Yu spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
216150fa53ecSChao Yu break;
216250fa53ecSChao Yu }
216350fa53ecSChao Yu cur_seq_id = fn->seq_id;
216450fa53ecSChao Yu page = fn->page;
216550fa53ecSChao Yu get_page(page);
216650fa53ecSChao Yu spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
21674ef51a8fSJaegeuk Kim
2168bae0ee7aSChao Yu f2fs_wait_on_page_writeback(page, NODE, true, false);
216950fa53ecSChao Yu
217050fa53ecSChao Yu put_page(page);
2171cfe58f9dSJaegeuk Kim }
2172cfe58f9dSJaegeuk Kim
217308c3eab5SChristophe JAILLET return filemap_check_errors(NODE_MAPPING(sbi));
2174cfe58f9dSJaegeuk Kim }
2175cfe58f9dSJaegeuk Kim
f2fs_write_node_pages(struct address_space * mapping,struct writeback_control * wbc)2176e05df3b1SJaegeuk Kim static int f2fs_write_node_pages(struct address_space *mapping,
2177e05df3b1SJaegeuk Kim struct writeback_control *wbc)
2178e05df3b1SJaegeuk Kim {
21794081363fSJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
21809dfa1bafSJaegeuk Kim struct blk_plug plug;
218150c8cdb3SJaegeuk Kim long diff;
2182e05df3b1SJaegeuk Kim
21830771fcc7SChao Yu if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
21840771fcc7SChao Yu goto skip_write;
21850771fcc7SChao Yu
21864660f9c0SJaegeuk Kim /* balancing f2fs's metadata in background */
21877bcd0cfaSChao Yu f2fs_balance_fs_bg(sbi, true);
2188e05df3b1SJaegeuk Kim
2189a7fdffbdSJaegeuk Kim /* collect a number of dirty node pages and write together */
2190812a9597SJaegeuk Kim if (wbc->sync_mode != WB_SYNC_ALL &&
2191812a9597SJaegeuk Kim get_pages(sbi, F2FS_DIRTY_NODES) <
2192812a9597SJaegeuk Kim nr_pages_to_skip(sbi, NODE))
2193d3baf95dSJaegeuk Kim goto skip_write;
2194a7fdffbdSJaegeuk Kim
2195c29fd0c0SChao Yu if (wbc->sync_mode == WB_SYNC_ALL)
2196c29fd0c0SChao Yu atomic_inc(&sbi->wb_sync_req[NODE]);
219734415099SChao Yu else if (atomic_read(&sbi->wb_sync_req[NODE])) {
219834415099SChao Yu /* to avoid potential deadlock */
219934415099SChao Yu if (current->plug)
220034415099SChao Yu blk_finish_plug(current->plug);
2201c29fd0c0SChao Yu goto skip_write;
220234415099SChao Yu }
2203c29fd0c0SChao Yu
2204d31c7c3fSYunlei He trace_f2fs_writepages(mapping->host, wbc, NODE);
2205d31c7c3fSYunlei He
220650c8cdb3SJaegeuk Kim diff = nr_pages_to_write(sbi, NODE, wbc);
22079dfa1bafSJaegeuk Kim blk_start_plug(&plug);
22084d57b86dSChao Yu f2fs_sync_node_pages(sbi, wbc, true, FS_NODE_IO);
22099dfa1bafSJaegeuk Kim blk_finish_plug(&plug);
221050c8cdb3SJaegeuk Kim wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
2211c29fd0c0SChao Yu
2212c29fd0c0SChao Yu if (wbc->sync_mode == WB_SYNC_ALL)
2213c29fd0c0SChao Yu atomic_dec(&sbi->wb_sync_req[NODE]);
2214e05df3b1SJaegeuk Kim return 0;
2215d3baf95dSJaegeuk Kim
2216d3baf95dSJaegeuk Kim skip_write:
2217d3baf95dSJaegeuk Kim wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
2218d31c7c3fSYunlei He trace_f2fs_writepages(mapping->host, wbc, NODE);
2219d3baf95dSJaegeuk Kim return 0;
2220e05df3b1SJaegeuk Kim }
2221e05df3b1SJaegeuk Kim
f2fs_dirty_node_folio(struct address_space * mapping,struct folio * folio)2222cbc975b1SMatthew Wilcox (Oracle) static bool f2fs_dirty_node_folio(struct address_space *mapping,
2223cbc975b1SMatthew Wilcox (Oracle) struct folio *folio)
2224e05df3b1SJaegeuk Kim {
222592f750d8SChao Yu trace_f2fs_set_page_dirty(folio, NODE);
222626c6b887SJaegeuk Kim
2227cbc975b1SMatthew Wilcox (Oracle) if (!folio_test_uptodate(folio))
2228cbc975b1SMatthew Wilcox (Oracle) folio_mark_uptodate(folio);
222954c55c4eSWeichao Guo #ifdef CONFIG_F2FS_CHECK_FS
2230cbc975b1SMatthew Wilcox (Oracle) if (IS_INODE(&folio->page))
223129c87793SMatthew Wilcox (Oracle) f2fs_inode_chksum_set(F2FS_M_SB(mapping), &folio->page);
223254c55c4eSWeichao Guo #endif
22339b7eadd9SShuqi Zhang if (filemap_dirty_folio(mapping, folio)) {
223429c87793SMatthew Wilcox (Oracle) inc_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_NODES);
2235cbc975b1SMatthew Wilcox (Oracle) set_page_private_reference(&folio->page);
2236cbc975b1SMatthew Wilcox (Oracle) return true;
2237e05df3b1SJaegeuk Kim }
2238cbc975b1SMatthew Wilcox (Oracle) return false;
2239e05df3b1SJaegeuk Kim }
2240e05df3b1SJaegeuk Kim
22410a8165d7SJaegeuk Kim /*
2242e05df3b1SJaegeuk Kim * Structure of the f2fs node operations
2243e05df3b1SJaegeuk Kim */
2244e05df3b1SJaegeuk Kim const struct address_space_operations f2fs_node_aops = {
2245e05df3b1SJaegeuk Kim .writepages = f2fs_write_node_pages,
2246cbc975b1SMatthew Wilcox (Oracle) .dirty_folio = f2fs_dirty_node_folio,
224791503996SMatthew Wilcox (Oracle) .invalidate_folio = f2fs_invalidate_folio,
2248c26cd045SMatthew Wilcox (Oracle) .release_folio = f2fs_release_folio,
22491d5b9bd6SMatthew Wilcox (Oracle) .migrate_folio = filemap_migrate_folio,
2250e05df3b1SJaegeuk Kim };
2251e05df3b1SJaegeuk Kim
__lookup_free_nid_list(struct f2fs_nm_info * nm_i,nid_t n)22528a7ed66aSJaegeuk Kim static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
22538a7ed66aSJaegeuk Kim nid_t n)
2254e05df3b1SJaegeuk Kim {
22558a7ed66aSJaegeuk Kim return radix_tree_lookup(&nm_i->free_nid_root, n);
22563aa770a9SNamjae Jeon }
2257e05df3b1SJaegeuk Kim
__insert_free_nid(struct f2fs_sb_info * sbi,struct free_nid * i)22589a4ffdf5SChao Yu static int __insert_free_nid(struct f2fs_sb_info *sbi,
2259b815bdc7SLiu Song struct free_nid *i)
2260e05df3b1SJaegeuk Kim {
2261b8559dc2SChao Yu struct f2fs_nm_info *nm_i = NM_I(sbi);
2262eb0aa4b8SJaegeuk Kim int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i);
22635f029c04SYi Zhuang
2264eb0aa4b8SJaegeuk Kim if (err)
2265eb0aa4b8SJaegeuk Kim return err;
2266eb0aa4b8SJaegeuk Kim
2267b815bdc7SLiu Song nm_i->nid_cnt[FREE_NID]++;
22689a4ffdf5SChao Yu list_add_tail(&i->list, &nm_i->free_nid_list);
2269eb0aa4b8SJaegeuk Kim return 0;
2270b8559dc2SChao Yu }
2271b8559dc2SChao Yu
__remove_free_nid(struct f2fs_sb_info * sbi,struct free_nid * i,enum nid_state state)22729a4ffdf5SChao Yu static void __remove_free_nid(struct f2fs_sb_info *sbi,
2273a0761f63SFan Li struct free_nid *i, enum nid_state state)
2274b8559dc2SChao Yu {
2275b8559dc2SChao Yu struct f2fs_nm_info *nm_i = NM_I(sbi);
2276b8559dc2SChao Yu
22779a4ffdf5SChao Yu f2fs_bug_on(sbi, state != i->state);
22789a4ffdf5SChao Yu nm_i->nid_cnt[state]--;
22799a4ffdf5SChao Yu if (state == FREE_NID)
2280e05df3b1SJaegeuk Kim list_del(&i->list);
22818a7ed66aSJaegeuk Kim radix_tree_delete(&nm_i->free_nid_root, i->nid);
2282e05df3b1SJaegeuk Kim }
2283e05df3b1SJaegeuk Kim
__move_free_nid(struct f2fs_sb_info * sbi,struct free_nid * i,enum nid_state org_state,enum nid_state dst_state)2284a0761f63SFan Li static void __move_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i,
2285a0761f63SFan Li enum nid_state org_state, enum nid_state dst_state)
2286a0761f63SFan Li {
2287a0761f63SFan Li struct f2fs_nm_info *nm_i = NM_I(sbi);
2288a0761f63SFan Li
2289a0761f63SFan Li f2fs_bug_on(sbi, org_state != i->state);
2290a0761f63SFan Li i->state = dst_state;
2291a0761f63SFan Li nm_i->nid_cnt[org_state]--;
2292a0761f63SFan Li nm_i->nid_cnt[dst_state]++;
2293a0761f63SFan Li
2294a0761f63SFan Li switch (dst_state) {
2295a0761f63SFan Li case PREALLOC_NID:
2296a0761f63SFan Li list_del(&i->list);
2297a0761f63SFan Li break;
2298a0761f63SFan Li case FREE_NID:
2299a0761f63SFan Li list_add_tail(&i->list, &nm_i->free_nid_list);
2300a0761f63SFan Li break;
2301a0761f63SFan Li default:
2302a0761f63SFan Li BUG_ON(1);
2303a0761f63SFan Li }
2304a0761f63SFan Li }
2305a0761f63SFan Li
update_free_nid_bitmap(struct f2fs_sb_info * sbi,nid_t nid,bool set,bool build)23065921aaa1SLiFan static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid,
23075921aaa1SLiFan bool set, bool build)
23085921aaa1SLiFan {
23095921aaa1SLiFan struct f2fs_nm_info *nm_i = NM_I(sbi);
23105921aaa1SLiFan unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid);
23115921aaa1SLiFan unsigned int nid_ofs = nid - START_NID(nid);
23125921aaa1SLiFan
23135921aaa1SLiFan if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
23145921aaa1SLiFan return;
23155921aaa1SLiFan
23165921aaa1SLiFan if (set) {
23175921aaa1SLiFan if (test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
23185921aaa1SLiFan return;
23195921aaa1SLiFan __set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
23205921aaa1SLiFan nm_i->free_nid_count[nat_ofs]++;
23215921aaa1SLiFan } else {
23225921aaa1SLiFan if (!test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
23235921aaa1SLiFan return;
23245921aaa1SLiFan __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
23255921aaa1SLiFan if (!build)
23265921aaa1SLiFan nm_i->free_nid_count[nat_ofs]--;
23275921aaa1SLiFan }
23285921aaa1SLiFan }
23295921aaa1SLiFan
23304ac91242SChao Yu /* return if the nid is recognized as free */
add_free_nid(struct f2fs_sb_info * sbi,nid_t nid,bool build,bool update)23315921aaa1SLiFan static bool add_free_nid(struct f2fs_sb_info *sbi,
23325921aaa1SLiFan nid_t nid, bool build, bool update)
2333e05df3b1SJaegeuk Kim {
23346fb03f3aSJaegeuk Kim struct f2fs_nm_info *nm_i = NM_I(sbi);
233530a61ddfSChao Yu struct free_nid *i, *e;
233659bbd474SJaegeuk Kim struct nat_entry *ne;
233730a61ddfSChao Yu int err = -EINVAL;
233830a61ddfSChao Yu bool ret = false;
23399198acebSJaegeuk Kim
23409198acebSJaegeuk Kim /* 0 nid should not be used */
2341cfb271d4SChao Yu if (unlikely(nid == 0))
23424ac91242SChao Yu return false;
234359bbd474SJaegeuk Kim
2344626bcf2bSChao Yu if (unlikely(f2fs_check_nid_range(sbi, nid)))
2345626bcf2bSChao Yu return false;
2346626bcf2bSChao Yu
234732410577SChao Yu i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS, true, NULL);
2348e05df3b1SJaegeuk Kim i->nid = nid;
23499a4ffdf5SChao Yu i->state = FREE_NID;
2350e05df3b1SJaegeuk Kim
23515921aaa1SLiFan radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
2352769ec6e5SJaegeuk Kim
2353b8559dc2SChao Yu spin_lock(&nm_i->nid_list_lock);
235430a61ddfSChao Yu
235530a61ddfSChao Yu if (build) {
235630a61ddfSChao Yu /*
235730a61ddfSChao Yu * Thread A Thread B
235830a61ddfSChao Yu * - f2fs_create
235930a61ddfSChao Yu * - f2fs_new_inode
23604d57b86dSChao Yu * - f2fs_alloc_nid
23619a4ffdf5SChao Yu * - __insert_nid_to_list(PREALLOC_NID)
236230a61ddfSChao Yu * - f2fs_balance_fs_bg
23634d57b86dSChao Yu * - f2fs_build_free_nids
23644d57b86dSChao Yu * - __f2fs_build_free_nids
236530a61ddfSChao Yu * - scan_nat_page
236630a61ddfSChao Yu * - add_free_nid
236730a61ddfSChao Yu * - __lookup_nat_cache
236830a61ddfSChao Yu * - f2fs_add_link
23694d57b86dSChao Yu * - f2fs_init_inode_metadata
23704d57b86dSChao Yu * - f2fs_new_inode_page
23714d57b86dSChao Yu * - f2fs_new_node_page
237230a61ddfSChao Yu * - set_node_addr
23734d57b86dSChao Yu * - f2fs_alloc_nid_done
23749a4ffdf5SChao Yu * - __remove_nid_from_list(PREALLOC_NID)
23759a4ffdf5SChao Yu * - __insert_nid_to_list(FREE_NID)
237630a61ddfSChao Yu */
237730a61ddfSChao Yu ne = __lookup_nat_cache(nm_i, nid);
237830a61ddfSChao Yu if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
237930a61ddfSChao Yu nat_get_blkaddr(ne) != NULL_ADDR))
238030a61ddfSChao Yu goto err_out;
238130a61ddfSChao Yu
238230a61ddfSChao Yu e = __lookup_free_nid_list(nm_i, nid);
238330a61ddfSChao Yu if (e) {
23849a4ffdf5SChao Yu if (e->state == FREE_NID)
238530a61ddfSChao Yu ret = true;
238630a61ddfSChao Yu goto err_out;
238730a61ddfSChao Yu }
238830a61ddfSChao Yu }
238930a61ddfSChao Yu ret = true;
2390b815bdc7SLiu Song err = __insert_free_nid(sbi, i);
239130a61ddfSChao Yu err_out:
23925921aaa1SLiFan if (update) {
23935921aaa1SLiFan update_free_nid_bitmap(sbi, nid, ret, build);
23945921aaa1SLiFan if (!build)
23955921aaa1SLiFan nm_i->available_nids++;
23965921aaa1SLiFan }
2397b8559dc2SChao Yu spin_unlock(&nm_i->nid_list_lock);
2398769ec6e5SJaegeuk Kim radix_tree_preload_end();
23995921aaa1SLiFan
240030a61ddfSChao Yu if (err)
2401e05df3b1SJaegeuk Kim kmem_cache_free(free_nid_slab, i);
240230a61ddfSChao Yu return ret;
2403e05df3b1SJaegeuk Kim }
2404e05df3b1SJaegeuk Kim
remove_free_nid(struct f2fs_sb_info * sbi,nid_t nid)2405b8559dc2SChao Yu static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
2406e05df3b1SJaegeuk Kim {
2407b8559dc2SChao Yu struct f2fs_nm_info *nm_i = NM_I(sbi);
2408e05df3b1SJaegeuk Kim struct free_nid *i;
2409cf0ee0f0SChao Yu bool need_free = false;
2410cf0ee0f0SChao Yu
2411b8559dc2SChao Yu spin_lock(&nm_i->nid_list_lock);
24128a7ed66aSJaegeuk Kim i = __lookup_free_nid_list(nm_i, nid);
24139a4ffdf5SChao Yu if (i && i->state == FREE_NID) {
2414a0761f63SFan Li __remove_free_nid(sbi, i, FREE_NID);
2415cf0ee0f0SChao Yu need_free = true;
2416e05df3b1SJaegeuk Kim }
2417b8559dc2SChao Yu spin_unlock(&nm_i->nid_list_lock);
2418cf0ee0f0SChao Yu
2419cf0ee0f0SChao Yu if (need_free)
2420cf0ee0f0SChao Yu kmem_cache_free(free_nid_slab, i);
2421e05df3b1SJaegeuk Kim }
2422e05df3b1SJaegeuk Kim
scan_nat_page(struct f2fs_sb_info * sbi,struct page * nat_page,nid_t start_nid)2423e2374015SChao Yu static int scan_nat_page(struct f2fs_sb_info *sbi,
2424e05df3b1SJaegeuk Kim struct page *nat_page, nid_t start_nid)
2425e05df3b1SJaegeuk Kim {
24266fb03f3aSJaegeuk Kim struct f2fs_nm_info *nm_i = NM_I(sbi);
2427e05df3b1SJaegeuk Kim struct f2fs_nat_block *nat_blk = page_address(nat_page);
2428e05df3b1SJaegeuk Kim block_t blk_addr;
24294ac91242SChao Yu unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid);
2430e05df3b1SJaegeuk Kim int i;
2431e05df3b1SJaegeuk Kim
243223380b85SJaegeuk Kim __set_bit_le(nat_ofs, nm_i->nat_block_bitmap);
24334ac91242SChao Yu
2434e05df3b1SJaegeuk Kim i = start_nid % NAT_ENTRY_PER_BLOCK;
2435e05df3b1SJaegeuk Kim
2436e05df3b1SJaegeuk Kim for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
2437cfb271d4SChao Yu if (unlikely(start_nid >= nm_i->max_nid))
243804431c44SJaegeuk Kim break;
243923d38844SHaicheng Li
2440e05df3b1SJaegeuk Kim blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
2441e2374015SChao Yu
2442e2374015SChao Yu if (blk_addr == NEW_ADDR)
2443a5e80e18SZhiguo Niu return -EFSCORRUPTED;
2444e2374015SChao Yu
24455921aaa1SLiFan if (blk_addr == NULL_ADDR) {
24465921aaa1SLiFan add_free_nid(sbi, start_nid, true, true);
24475921aaa1SLiFan } else {
2448346fe752SChao Yu spin_lock(&NM_I(sbi)->nid_list_lock);
24495921aaa1SLiFan update_free_nid_bitmap(sbi, start_nid, false, true);
2450346fe752SChao Yu spin_unlock(&NM_I(sbi)->nid_list_lock);
2451e05df3b1SJaegeuk Kim }
2452e05df3b1SJaegeuk Kim }
2453e2374015SChao Yu
2454e2374015SChao Yu return 0;
24555921aaa1SLiFan }
2456e05df3b1SJaegeuk Kim
scan_curseg_cache(struct f2fs_sb_info * sbi)24572fbaa25fSChao Yu static void scan_curseg_cache(struct f2fs_sb_info *sbi)
24584ac91242SChao Yu {
24594ac91242SChao Yu struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
24604ac91242SChao Yu struct f2fs_journal *journal = curseg->journal;
24612fbaa25fSChao Yu int i;
24624ac91242SChao Yu
24634ac91242SChao Yu down_read(&curseg->journal_rwsem);
24644ac91242SChao Yu for (i = 0; i < nats_in_cursum(journal); i++) {
24654ac91242SChao Yu block_t addr;
24664ac91242SChao Yu nid_t nid;
24674ac91242SChao Yu
24684ac91242SChao Yu addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
24694ac91242SChao Yu nid = le32_to_cpu(nid_in_journal(journal, i));
24704ac91242SChao Yu if (addr == NULL_ADDR)
24715921aaa1SLiFan add_free_nid(sbi, nid, true, false);
24724ac91242SChao Yu else
24734ac91242SChao Yu remove_free_nid(sbi, nid);
24744ac91242SChao Yu }
24754ac91242SChao Yu up_read(&curseg->journal_rwsem);
24762fbaa25fSChao Yu }
24772fbaa25fSChao Yu
scan_free_nid_bits(struct f2fs_sb_info * sbi)2478e05df3b1SJaegeuk Kim static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
24794ac91242SChao Yu {
24804ac91242SChao Yu struct f2fs_nm_info *nm_i = NM_I(sbi);
24814ac91242SChao Yu unsigned int i, idx;
248297456574SFan Li nid_t nid;
24834ac91242SChao Yu
2484e4544b63STim Murray f2fs_down_read(&nm_i->nat_tree_lock);
24854ac91242SChao Yu
24864ac91242SChao Yu for (i = 0; i < nm_i->nat_blocks; i++) {
24874ac91242SChao Yu if (!test_bit_le(i, nm_i->nat_block_bitmap))
24884ac91242SChao Yu continue;
24894ac91242SChao Yu if (!nm_i->free_nid_count[i])
24904ac91242SChao Yu continue;
24914ac91242SChao Yu for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) {
249297456574SFan Li idx = find_next_bit_le(nm_i->free_nid_bitmap[i],
249397456574SFan Li NAT_ENTRY_PER_BLOCK, idx);
249497456574SFan Li if (idx >= NAT_ENTRY_PER_BLOCK)
249597456574SFan Li break;
24964ac91242SChao Yu
24974ac91242SChao Yu nid = i * NAT_ENTRY_PER_BLOCK + idx;
24985921aaa1SLiFan add_free_nid(sbi, nid, true, false);
24994ac91242SChao Yu
25009a4ffdf5SChao Yu if (nm_i->nid_cnt[FREE_NID] >= MAX_FREE_NIDS)
25014ac91242SChao Yu goto out;
25024ac91242SChao Yu }
25034ac91242SChao Yu }
25044ac91242SChao Yu out:
25052fbaa25fSChao Yu scan_curseg_cache(sbi);
25064ac91242SChao Yu
2507e4544b63STim Murray f2fs_up_read(&nm_i->nat_tree_lock);
25084ac91242SChao Yu }
25094ac91242SChao Yu
__f2fs_build_free_nids(struct f2fs_sb_info * sbi,bool sync,bool mount)2510e2374015SChao Yu static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
25114d57b86dSChao Yu bool sync, bool mount)
2512e05df3b1SJaegeuk Kim {
2513e05df3b1SJaegeuk Kim struct f2fs_nm_info *nm_i = NM_I(sbi);
2514e2374015SChao Yu int i = 0, ret;
251555008d84SJaegeuk Kim nid_t nid = nm_i->next_scan_nid;
2516e05df3b1SJaegeuk Kim
2517e9cdd307SYunlei He if (unlikely(nid >= nm_i->max_nid))
2518e9cdd307SYunlei He nid = 0;
2519e9cdd307SYunlei He
2520e2cab031SSahitya Tummala if (unlikely(nid % NAT_ENTRY_PER_BLOCK))
2521e2cab031SSahitya Tummala nid = NAT_BLOCK_OFFSET(nid) * NAT_ENTRY_PER_BLOCK;
2522e2cab031SSahitya Tummala
252355008d84SJaegeuk Kim /* Enough entries */
25249a4ffdf5SChao Yu if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2525e2374015SChao Yu return 0;
2526e05df3b1SJaegeuk Kim
25274d57b86dSChao Yu if (!sync && !f2fs_available_free_memory(sbi, FREE_NIDS))
2528e2374015SChao Yu return 0;
2529e05df3b1SJaegeuk Kim
25304ac91242SChao Yu if (!mount) {
25314ac91242SChao Yu /* try to find free nids in free_nid_bitmap */
25324ac91242SChao Yu scan_free_nid_bits(sbi);
25334ac91242SChao Yu
253474986213SFan Li if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2535e2374015SChao Yu return 0;
253622ad0b6aSJaegeuk Kim }
253722ad0b6aSJaegeuk Kim
253855008d84SJaegeuk Kim /* readahead nat pages to be scanned */
25394d57b86dSChao Yu f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
254026879fb1SChao Yu META_NAT, true);
2541e05df3b1SJaegeuk Kim
2542e4544b63STim Murray f2fs_down_read(&nm_i->nat_tree_lock);
2543a5131193SJaegeuk Kim
2544e05df3b1SJaegeuk Kim while (1) {
254566e83361SYunlei He if (!test_bit_le(NAT_BLOCK_OFFSET(nid),
254666e83361SYunlei He nm_i->nat_block_bitmap)) {
2547e05df3b1SJaegeuk Kim struct page *page = get_current_nat_page(sbi, nid);
2548e05df3b1SJaegeuk Kim
2549edc55aafSJaegeuk Kim if (IS_ERR(page)) {
2550edc55aafSJaegeuk Kim ret = PTR_ERR(page);
2551edc55aafSJaegeuk Kim } else {
2552e2374015SChao Yu ret = scan_nat_page(sbi, page, nid);
2553e05df3b1SJaegeuk Kim f2fs_put_page(page, 1);
2554edc55aafSJaegeuk Kim }
2555e2374015SChao Yu
2556e2374015SChao Yu if (ret) {
2557e4544b63STim Murray f2fs_up_read(&nm_i->nat_tree_lock);
2558a5e80e18SZhiguo Niu
2559a5e80e18SZhiguo Niu if (ret == -EFSCORRUPTED) {
2560dcbb4c10SJoe Perches f2fs_err(sbi, "NAT is corrupt, run fsck to fix it");
2561a5e80e18SZhiguo Niu set_sbi_flag(sbi, SBI_NEED_FSCK);
2562a5e80e18SZhiguo Niu f2fs_handle_error(sbi,
2563a5e80e18SZhiguo Niu ERROR_INCONSISTENT_NAT);
2564a5e80e18SZhiguo Niu }
2565a5e80e18SZhiguo Niu
2566edc55aafSJaegeuk Kim return ret;
2567e2374015SChao Yu }
256866e83361SYunlei He }
2569e05df3b1SJaegeuk Kim
2570e05df3b1SJaegeuk Kim nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
2571cfb271d4SChao Yu if (unlikely(nid >= nm_i->max_nid))
2572e05df3b1SJaegeuk Kim nid = 0;
257355008d84SJaegeuk Kim
2574a6d494b6SChao Yu if (++i >= FREE_NID_PAGES)
2575e05df3b1SJaegeuk Kim break;
2576e05df3b1SJaegeuk Kim }
2577e05df3b1SJaegeuk Kim
257855008d84SJaegeuk Kim /* go to the next free nat pages to find free nids abundantly */
257955008d84SJaegeuk Kim nm_i->next_scan_nid = nid;
2580e05df3b1SJaegeuk Kim
2581e05df3b1SJaegeuk Kim /* find free nids from current sum_pages */
25822fbaa25fSChao Yu scan_curseg_cache(sbi);
2583dfc08a12SChao Yu
2584e4544b63STim Murray f2fs_up_read(&nm_i->nat_tree_lock);
25852db2388fSChao Yu
25864d57b86dSChao Yu f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
2587ea1a29a0SChao Yu nm_i->ra_nid_pages, META_NAT, false);
2588e2374015SChao Yu
2589e2374015SChao Yu return 0;
2590e05df3b1SJaegeuk Kim }
2591e05df3b1SJaegeuk Kim
f2fs_build_free_nids(struct f2fs_sb_info * sbi,bool sync,bool mount)2592e2374015SChao Yu int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
25932411cf5bSChao Yu {
2594e2374015SChao Yu int ret;
2595e2374015SChao Yu
25962411cf5bSChao Yu mutex_lock(&NM_I(sbi)->build_lock);
2597e2374015SChao Yu ret = __f2fs_build_free_nids(sbi, sync, mount);
25982411cf5bSChao Yu mutex_unlock(&NM_I(sbi)->build_lock);
2599e2374015SChao Yu
2600e2374015SChao Yu return ret;
26012411cf5bSChao Yu }
26022411cf5bSChao Yu
2603e05df3b1SJaegeuk Kim /*
2604e05df3b1SJaegeuk Kim * If this function returns success, caller can obtain a new nid
2605e05df3b1SJaegeuk Kim * from second parameter of this function.
2606e05df3b1SJaegeuk Kim * The returned nid could be used ino as well as nid when inode is created.
2607e05df3b1SJaegeuk Kim */
f2fs_alloc_nid(struct f2fs_sb_info * sbi,nid_t * nid)26084d57b86dSChao Yu bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
2609e05df3b1SJaegeuk Kim {
2610e05df3b1SJaegeuk Kim struct f2fs_nm_info *nm_i = NM_I(sbi);
2611e05df3b1SJaegeuk Kim struct free_nid *i = NULL;
2612e05df3b1SJaegeuk Kim retry:
2613c40e15a9SYangtao Li if (time_to_inject(sbi, FAULT_ALLOC_NID))
2614cb78942bSJaegeuk Kim return false;
26157fa750a1SArnd Bergmann
2616b8559dc2SChao Yu spin_lock(&nm_i->nid_list_lock);
2617e05df3b1SJaegeuk Kim
261804d47e67SChao Yu if (unlikely(nm_i->available_nids == 0)) {
261904d47e67SChao Yu spin_unlock(&nm_i->nid_list_lock);
262004d47e67SChao Yu return false;
262104d47e67SChao Yu }
2622e05df3b1SJaegeuk Kim
26234d57b86dSChao Yu /* We should not use stale free nids created by f2fs_build_free_nids */
26244d57b86dSChao Yu if (nm_i->nid_cnt[FREE_NID] && !on_f2fs_build_free_nids(nm_i)) {
26259a4ffdf5SChao Yu f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
26269a4ffdf5SChao Yu i = list_first_entry(&nm_i->free_nid_list,
2627b8559dc2SChao Yu struct free_nid, list);
2628e05df3b1SJaegeuk Kim *nid = i->nid;
2629b8559dc2SChao Yu
2630a0761f63SFan Li __move_free_nid(sbi, i, FREE_NID, PREALLOC_NID);
263104d47e67SChao Yu nm_i->available_nids--;
26324ac91242SChao Yu
2633346fe752SChao Yu update_free_nid_bitmap(sbi, *nid, false, false);
26344ac91242SChao Yu
2635b8559dc2SChao Yu spin_unlock(&nm_i->nid_list_lock);
2636e05df3b1SJaegeuk Kim return true;
2637e05df3b1SJaegeuk Kim }
2638b8559dc2SChao Yu spin_unlock(&nm_i->nid_list_lock);
263955008d84SJaegeuk Kim
264055008d84SJaegeuk Kim /* Let's scan nat pages and its caches to get free nids */
2641f84262b0SJaegeuk Kim if (!f2fs_build_free_nids(sbi, true, false))
264255008d84SJaegeuk Kim goto retry;
2643f84262b0SJaegeuk Kim return false;
264455008d84SJaegeuk Kim }
2645e05df3b1SJaegeuk Kim
26460a8165d7SJaegeuk Kim /*
26474d57b86dSChao Yu * f2fs_alloc_nid() should be called prior to this function.
2648e05df3b1SJaegeuk Kim */
f2fs_alloc_nid_done(struct f2fs_sb_info * sbi,nid_t nid)26494d57b86dSChao Yu void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
2650e05df3b1SJaegeuk Kim {
2651e05df3b1SJaegeuk Kim struct f2fs_nm_info *nm_i = NM_I(sbi);
2652e05df3b1SJaegeuk Kim struct free_nid *i;
2653e05df3b1SJaegeuk Kim
2654b8559dc2SChao Yu spin_lock(&nm_i->nid_list_lock);
26558a7ed66aSJaegeuk Kim i = __lookup_free_nid_list(nm_i, nid);
2656b8559dc2SChao Yu f2fs_bug_on(sbi, !i);
2657a0761f63SFan Li __remove_free_nid(sbi, i, PREALLOC_NID);
2658b8559dc2SChao Yu spin_unlock(&nm_i->nid_list_lock);
2659cf0ee0f0SChao Yu
2660cf0ee0f0SChao Yu kmem_cache_free(free_nid_slab, i);
2661e05df3b1SJaegeuk Kim }
2662e05df3b1SJaegeuk Kim
26630a8165d7SJaegeuk Kim /*
26644d57b86dSChao Yu * f2fs_alloc_nid() should be called prior to this function.
2665e05df3b1SJaegeuk Kim */
f2fs_alloc_nid_failed(struct f2fs_sb_info * sbi,nid_t nid)26664d57b86dSChao Yu void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
2667e05df3b1SJaegeuk Kim {
266849952fa1SJaegeuk Kim struct f2fs_nm_info *nm_i = NM_I(sbi);
266949952fa1SJaegeuk Kim struct free_nid *i;
2670cf0ee0f0SChao Yu bool need_free = false;
267149952fa1SJaegeuk Kim
267265985d93SJaegeuk Kim if (!nid)
267365985d93SJaegeuk Kim return;
267465985d93SJaegeuk Kim
2675b8559dc2SChao Yu spin_lock(&nm_i->nid_list_lock);
26768a7ed66aSJaegeuk Kim i = __lookup_free_nid_list(nm_i, nid);
2677b8559dc2SChao Yu f2fs_bug_on(sbi, !i);
2678b8559dc2SChao Yu
26794d57b86dSChao Yu if (!f2fs_available_free_memory(sbi, FREE_NIDS)) {
2680a0761f63SFan Li __remove_free_nid(sbi, i, PREALLOC_NID);
2681cf0ee0f0SChao Yu need_free = true;
268295630cbaSHaicheng Li } else {
2683a0761f63SFan Li __move_free_nid(sbi, i, PREALLOC_NID, FREE_NID);
268495630cbaSHaicheng Li }
268504d47e67SChao Yu
268604d47e67SChao Yu nm_i->available_nids++;
268704d47e67SChao Yu
2688346fe752SChao Yu update_free_nid_bitmap(sbi, nid, true, false);
26894ac91242SChao Yu
2690b8559dc2SChao Yu spin_unlock(&nm_i->nid_list_lock);
2691cf0ee0f0SChao Yu
2692cf0ee0f0SChao Yu if (need_free)
2693cf0ee0f0SChao Yu kmem_cache_free(free_nid_slab, i);
2694e05df3b1SJaegeuk Kim }
2695e05df3b1SJaegeuk Kim
f2fs_try_to_free_nids(struct f2fs_sb_info * sbi,int nr_shrink)26964d57b86dSChao Yu int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
269731696580SChao Yu {
269831696580SChao Yu struct f2fs_nm_info *nm_i = NM_I(sbi);
269931696580SChao Yu int nr = nr_shrink;
270031696580SChao Yu
27019a4ffdf5SChao Yu if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
2702ad4edb83SJaegeuk Kim return 0;
2703ad4edb83SJaegeuk Kim
270431696580SChao Yu if (!mutex_trylock(&nm_i->build_lock))
270531696580SChao Yu return 0;
270631696580SChao Yu
2707042be373SChao Yu while (nr_shrink && nm_i->nid_cnt[FREE_NID] > MAX_FREE_NIDS) {
2708042be373SChao Yu struct free_nid *i, *next;
2709042be373SChao Yu unsigned int batch = SHRINK_NID_BATCH_SIZE;
2710042be373SChao Yu
2711b8559dc2SChao Yu spin_lock(&nm_i->nid_list_lock);
27129a4ffdf5SChao Yu list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
2713042be373SChao Yu if (!nr_shrink || !batch ||
27149a4ffdf5SChao Yu nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
271531696580SChao Yu break;
2716a0761f63SFan Li __remove_free_nid(sbi, i, FREE_NID);
271731696580SChao Yu kmem_cache_free(free_nid_slab, i);
271831696580SChao Yu nr_shrink--;
2719042be373SChao Yu batch--;
272031696580SChao Yu }
2721b8559dc2SChao Yu spin_unlock(&nm_i->nid_list_lock);
2722042be373SChao Yu }
2723042be373SChao Yu
272431696580SChao Yu mutex_unlock(&nm_i->build_lock);
272531696580SChao Yu
272631696580SChao Yu return nr - nr_shrink;
272731696580SChao Yu }
272831696580SChao Yu
f2fs_recover_inline_xattr(struct inode * inode,struct page * page)27299627a7b3SChao Yu int f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
273028cdce04SChao Yu {
273128cdce04SChao Yu void *src_addr, *dst_addr;
273228cdce04SChao Yu size_t inline_size;
273328cdce04SChao Yu struct page *ipage;
273428cdce04SChao Yu struct f2fs_inode *ri;
273528cdce04SChao Yu
27361cf6b567SChao Yu ipage = f2fs_get_inode_page(F2FS_I_SB(inode), inode->i_ino);
27379627a7b3SChao Yu if (IS_ERR(ipage))
27389627a7b3SChao Yu return PTR_ERR(ipage);
273928cdce04SChao Yu
2740e3b4d43fSJaegeuk Kim ri = F2FS_INODE(page);
27411eca05aaSYunlei He if (ri->i_inline & F2FS_INLINE_XATTR) {
274296dd0251SChao Yu if (!f2fs_has_inline_xattr(inode)) {
27431eca05aaSYunlei He set_inode_flag(inode, FI_INLINE_XATTR);
274496dd0251SChao Yu stat_inc_inline_xattr(inode);
274596dd0251SChao Yu }
27461eca05aaSYunlei He } else {
274796dd0251SChao Yu if (f2fs_has_inline_xattr(inode)) {
274896dd0251SChao Yu stat_dec_inline_xattr(inode);
274991942321SJaegeuk Kim clear_inode_flag(inode, FI_INLINE_XATTR);
275096dd0251SChao Yu }
2751e3b4d43fSJaegeuk Kim goto update_inode;
2752e3b4d43fSJaegeuk Kim }
2753e3b4d43fSJaegeuk Kim
27546afc662eSChao Yu dst_addr = inline_xattr_addr(inode, ipage);
27556afc662eSChao Yu src_addr = inline_xattr_addr(inode, page);
275628cdce04SChao Yu inline_size = inline_xattr_size(inode);
275728cdce04SChao Yu
2758bae0ee7aSChao Yu f2fs_wait_on_page_writeback(ipage, NODE, true, true);
275928cdce04SChao Yu memcpy(dst_addr, src_addr, inline_size);
2760e3b4d43fSJaegeuk Kim update_inode:
27614d57b86dSChao Yu f2fs_update_inode(inode, ipage);
276228cdce04SChao Yu f2fs_put_page(ipage, 1);
27639627a7b3SChao Yu return 0;
276428cdce04SChao Yu }
276528cdce04SChao Yu
f2fs_recover_xattr_data(struct inode * inode,struct page * page)27664d57b86dSChao Yu int f2fs_recover_xattr_data(struct inode *inode, struct page *page)
2767abb2366cSJaegeuk Kim {
27684081363fSJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2769abb2366cSJaegeuk Kim nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
277087905682SYunlei He nid_t new_xnid;
277187905682SYunlei He struct dnode_of_data dn;
2772abb2366cSJaegeuk Kim struct node_info ni;
2773d260081cSChao Yu struct page *xpage;
27747735730dSChao Yu int err;
2775abb2366cSJaegeuk Kim
2776abb2366cSJaegeuk Kim if (!prev_xnid)
2777abb2366cSJaegeuk Kim goto recover_xnid;
2778abb2366cSJaegeuk Kim
2779d260081cSChao Yu /* 1: invalidate the previous xattr nid */
2780a9419b63SJaegeuk Kim err = f2fs_get_node_info(sbi, prev_xnid, &ni, false);
27817735730dSChao Yu if (err)
27827735730dSChao Yu return err;
27837735730dSChao Yu
2784e53c568fSYi Sun f2fs_invalidate_blocks(sbi, ni.blk_addr, 1);
2785000519f2SChao Yu dec_valid_node_count(sbi, inode, false);
2786479f40c4SJaegeuk Kim set_node_addr(sbi, &ni, NULL_ADDR, false);
2787abb2366cSJaegeuk Kim
2788abb2366cSJaegeuk Kim recover_xnid:
2789d260081cSChao Yu /* 2: update xattr nid in inode */
27904d57b86dSChao Yu if (!f2fs_alloc_nid(sbi, &new_xnid))
279187905682SYunlei He return -ENOSPC;
279287905682SYunlei He
279387905682SYunlei He set_new_dnode(&dn, inode, NULL, NULL, new_xnid);
27944d57b86dSChao Yu xpage = f2fs_new_node_page(&dn, XATTR_NODE_OFFSET);
279587905682SYunlei He if (IS_ERR(xpage)) {
27964d57b86dSChao Yu f2fs_alloc_nid_failed(sbi, new_xnid);
279787905682SYunlei He return PTR_ERR(xpage);
279887905682SYunlei He }
279987905682SYunlei He
28004d57b86dSChao Yu f2fs_alloc_nid_done(sbi, new_xnid);
28014d57b86dSChao Yu f2fs_update_inode_page(inode);
2802abb2366cSJaegeuk Kim
2803d260081cSChao Yu /* 3: update and set xattr node page dirty */
280486d7d57aSZhiguo Niu if (page) {
280550a472bbSJaegeuk Kim memcpy(F2FS_NODE(xpage), F2FS_NODE(page),
280650a472bbSJaegeuk Kim VALID_XATTR_BLOCK_SIZE);
2807d260081cSChao Yu set_page_dirty(xpage);
280886d7d57aSZhiguo Niu }
2809d260081cSChao Yu f2fs_put_page(xpage, 1);
2810abb2366cSJaegeuk Kim
2811d260081cSChao Yu return 0;
2812abb2366cSJaegeuk Kim }
2813abb2366cSJaegeuk Kim
f2fs_recover_inode_page(struct f2fs_sb_info * sbi,struct page * page)28144d57b86dSChao Yu int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
2815e05df3b1SJaegeuk Kim {
281658bfaf44SJaegeuk Kim struct f2fs_inode *src, *dst;
2817e05df3b1SJaegeuk Kim nid_t ino = ino_of_node(page);
2818e05df3b1SJaegeuk Kim struct node_info old_ni, new_ni;
2819e05df3b1SJaegeuk Kim struct page *ipage;
28207735730dSChao Yu int err;
2821e05df3b1SJaegeuk Kim
2822a9419b63SJaegeuk Kim err = f2fs_get_node_info(sbi, ino, &old_ni, false);
28237735730dSChao Yu if (err)
28247735730dSChao Yu return err;
2825e8271fa3SJaegeuk Kim
2826e8271fa3SJaegeuk Kim if (unlikely(old_ni.blk_addr != NULL_ADDR))
2827e8271fa3SJaegeuk Kim return -EINVAL;
2828e8ea9b3dSJaegeuk Kim retry:
2829300e129cSJaegeuk Kim ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false);
2830e8ea9b3dSJaegeuk Kim if (!ipage) {
28314034247aSNeilBrown memalloc_retry_wait(GFP_NOFS);
2832e8ea9b3dSJaegeuk Kim goto retry;
2833e8ea9b3dSJaegeuk Kim }
2834e05df3b1SJaegeuk Kim
2835e05df3b1SJaegeuk Kim /* Should not use this inode from free nid list */
2836b8559dc2SChao Yu remove_free_nid(sbi, ino);
2837e05df3b1SJaegeuk Kim
2838237c0790SJaegeuk Kim if (!PageUptodate(ipage))
2839e05df3b1SJaegeuk Kim SetPageUptodate(ipage);
2840e05df3b1SJaegeuk Kim fill_node_footer(ipage, ino, ino, 0, true);
2841ef2a0071SChao Yu set_cold_node(ipage, false);
2842e05df3b1SJaegeuk Kim
284358bfaf44SJaegeuk Kim src = F2FS_INODE(page);
284458bfaf44SJaegeuk Kim dst = F2FS_INODE(ipage);
2845e05df3b1SJaegeuk Kim
284636218b81SZheng Yongjun memcpy(dst, src, offsetof(struct f2fs_inode, i_ext));
284758bfaf44SJaegeuk Kim dst->i_size = 0;
284858bfaf44SJaegeuk Kim dst->i_blocks = cpu_to_le64(1);
284958bfaf44SJaegeuk Kim dst->i_links = cpu_to_le32(1);
285058bfaf44SJaegeuk Kim dst->i_xattr_nid = 0;
28517a2af766SChao Yu dst->i_inline = src->i_inline & (F2FS_INLINE_XATTR | F2FS_EXTRA_ATTR);
28525c57132eSChao Yu if (dst->i_inline & F2FS_EXTRA_ATTR) {
28537a2af766SChao Yu dst->i_extra_isize = src->i_extra_isize;
28546afc662eSChao Yu
28557beb01f7SChao Yu if (f2fs_sb_has_flexible_inline_xattr(sbi) &&
28566afc662eSChao Yu F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
28576afc662eSChao Yu i_inline_xattr_size))
28586afc662eSChao Yu dst->i_inline_xattr_size = src->i_inline_xattr_size;
28596afc662eSChao Yu
28607beb01f7SChao Yu if (f2fs_sb_has_project_quota(sbi) &&
28615c57132eSChao Yu F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
28625c57132eSChao Yu i_projid))
28635c57132eSChao Yu dst->i_projid = src->i_projid;
28645cd1f387SChao Yu
28657beb01f7SChao Yu if (f2fs_sb_has_inode_crtime(sbi) &&
28665cd1f387SChao Yu F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
28675cd1f387SChao Yu i_crtime_nsec)) {
28685cd1f387SChao Yu dst->i_crtime = src->i_crtime;
28695cd1f387SChao Yu dst->i_crtime_nsec = src->i_crtime_nsec;
28705cd1f387SChao Yu }
28715c57132eSChao Yu }
2872e05df3b1SJaegeuk Kim
2873e05df3b1SJaegeuk Kim new_ni = old_ni;
2874e05df3b1SJaegeuk Kim new_ni.ino = ino;
2875e05df3b1SJaegeuk Kim
28760abd675eSChao Yu if (unlikely(inc_valid_node_count(sbi, NULL, true)))
287765e5cd0aSJaegeuk Kim WARN_ON(1);
2878479f40c4SJaegeuk Kim set_node_addr(sbi, &new_ni, NEW_ADDR, false);
2879e05df3b1SJaegeuk Kim inc_valid_inode_count(sbi);
2880617deb8cSJaegeuk Kim set_page_dirty(ipage);
2881e05df3b1SJaegeuk Kim f2fs_put_page(ipage, 1);
2882e05df3b1SJaegeuk Kim return 0;
2883e05df3b1SJaegeuk Kim }
2884e05df3b1SJaegeuk Kim
f2fs_restore_node_summary(struct f2fs_sb_info * sbi,unsigned int segno,struct f2fs_summary_block * sum)28857735730dSChao Yu int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
2886e05df3b1SJaegeuk Kim unsigned int segno, struct f2fs_summary_block *sum)
2887e05df3b1SJaegeuk Kim {
2888e05df3b1SJaegeuk Kim struct f2fs_node *rn;
2889e05df3b1SJaegeuk Kim struct f2fs_summary *sum_entry;
2890e05df3b1SJaegeuk Kim block_t addr;
28919ecf4b80SChao Yu int i, idx, last_offset, nrpages;
2892e05df3b1SJaegeuk Kim
2893e05df3b1SJaegeuk Kim /* scan the node segment */
2894a60108f7SJaegeuk Kim last_offset = BLKS_PER_SEG(sbi);
2895e05df3b1SJaegeuk Kim addr = START_BLOCK(sbi, segno);
2896e05df3b1SJaegeuk Kim sum_entry = &sum->entries[0];
2897e05df3b1SJaegeuk Kim
28989ecf4b80SChao Yu for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
28995f7136dbSMatthew Wilcox (Oracle) nrpages = bio_max_segs(last_offset - i);
2900393ff91fSJaegeuk Kim
29019af0ff1cSChao Yu /* readahead node pages */
29024d57b86dSChao Yu f2fs_ra_meta_pages(sbi, addr, nrpages, META_POR, true);
29039af0ff1cSChao Yu
29049ecf4b80SChao Yu for (idx = addr; idx < addr + nrpages; idx++) {
29054d57b86dSChao Yu struct page *page = f2fs_get_tmp_page(sbi, idx);
2906393ff91fSJaegeuk Kim
29077735730dSChao Yu if (IS_ERR(page))
29087735730dSChao Yu return PTR_ERR(page);
29097735730dSChao Yu
29109ecf4b80SChao Yu rn = F2FS_NODE(page);
2911393ff91fSJaegeuk Kim sum_entry->nid = rn->footer.nid;
2912393ff91fSJaegeuk Kim sum_entry->version = 0;
2913393ff91fSJaegeuk Kim sum_entry->ofs_in_node = 0;
29149af0ff1cSChao Yu sum_entry++;
29159ecf4b80SChao Yu f2fs_put_page(page, 1);
29169af0ff1cSChao Yu }
2917bac4eef6SChao Yu
29189ecf4b80SChao Yu invalidate_mapping_pages(META_MAPPING(sbi), addr,
2919bac4eef6SChao Yu addr + nrpages);
29209af0ff1cSChao Yu }
29217735730dSChao Yu return 0;
2922e05df3b1SJaegeuk Kim }
2923e05df3b1SJaegeuk Kim
remove_nats_in_journal(struct f2fs_sb_info * sbi)2924aec71382SChao Yu static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
2925e05df3b1SJaegeuk Kim {
2926e05df3b1SJaegeuk Kim struct f2fs_nm_info *nm_i = NM_I(sbi);
2927e05df3b1SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2928b7ad7512SChao Yu struct f2fs_journal *journal = curseg->journal;
2929e05df3b1SJaegeuk Kim int i;
2930e05df3b1SJaegeuk Kim
2931b7ad7512SChao Yu down_write(&curseg->journal_rwsem);
2932dfc08a12SChao Yu for (i = 0; i < nats_in_cursum(journal); i++) {
2933e05df3b1SJaegeuk Kim struct nat_entry *ne;
2934e05df3b1SJaegeuk Kim struct f2fs_nat_entry raw_ne;
2935dfc08a12SChao Yu nid_t nid = le32_to_cpu(nid_in_journal(journal, i));
2936e05df3b1SJaegeuk Kim
2937b862676eSChao Yu if (f2fs_check_nid_range(sbi, nid))
2938b862676eSChao Yu continue;
2939b862676eSChao Yu
2940dfc08a12SChao Yu raw_ne = nat_in_journal(journal, i);
29419be32d72SJaegeuk Kim
2942e05df3b1SJaegeuk Kim ne = __lookup_nat_cache(nm_i, nid);
2943e05df3b1SJaegeuk Kim if (!ne) {
294432410577SChao Yu ne = __alloc_nat_entry(sbi, nid, true);
294512f9ef37SYunlei He __init_nat_entry(nm_i, ne, &raw_ne, true);
29469be32d72SJaegeuk Kim }
294704d47e67SChao Yu
294804d47e67SChao Yu /*
294904d47e67SChao Yu * if a free nat in journal has not been used after last
295004d47e67SChao Yu * checkpoint, we should remove it from available nids,
295104d47e67SChao Yu * since later we will add it again.
295204d47e67SChao Yu */
295304d47e67SChao Yu if (!get_nat_flag(ne, IS_DIRTY) &&
295404d47e67SChao Yu le32_to_cpu(raw_ne.block_addr) == NULL_ADDR) {
295504d47e67SChao Yu spin_lock(&nm_i->nid_list_lock);
295604d47e67SChao Yu nm_i->available_nids--;
295704d47e67SChao Yu spin_unlock(&nm_i->nid_list_lock);
295804d47e67SChao Yu }
295904d47e67SChao Yu
2960e05df3b1SJaegeuk Kim __set_nat_cache_dirty(nm_i, ne);
2961e05df3b1SJaegeuk Kim }
2962dfc08a12SChao Yu update_nats_in_cursum(journal, -i);
2963b7ad7512SChao Yu up_write(&curseg->journal_rwsem);
2964e05df3b1SJaegeuk Kim }
2965e05df3b1SJaegeuk Kim
__adjust_nat_entry_set(struct nat_entry_set * nes,struct list_head * head,int max)2966309cc2b6SJaegeuk Kim static void __adjust_nat_entry_set(struct nat_entry_set *nes,
2967309cc2b6SJaegeuk Kim struct list_head *head, int max)
2968e05df3b1SJaegeuk Kim {
2969309cc2b6SJaegeuk Kim struct nat_entry_set *cur;
2970e05df3b1SJaegeuk Kim
2971309cc2b6SJaegeuk Kim if (nes->entry_cnt >= max)
2972309cc2b6SJaegeuk Kim goto add_out;
2973e05df3b1SJaegeuk Kim
2974309cc2b6SJaegeuk Kim list_for_each_entry(cur, head, set_list) {
2975309cc2b6SJaegeuk Kim if (cur->entry_cnt >= nes->entry_cnt) {
2976309cc2b6SJaegeuk Kim list_add(&nes->set_list, cur->set_list.prev);
2977309cc2b6SJaegeuk Kim return;
2978309cc2b6SJaegeuk Kim }
2979309cc2b6SJaegeuk Kim }
2980309cc2b6SJaegeuk Kim add_out:
2981309cc2b6SJaegeuk Kim list_add_tail(&nes->set_list, head);
2982aec71382SChao Yu }
2983aec71382SChao Yu
__update_nat_bits(struct f2fs_sb_info * sbi,nid_t start_nid,struct page * page)2984*19426c49SChao Yu static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
298522ad0b6aSJaegeuk Kim struct page *page)
298622ad0b6aSJaegeuk Kim {
298722ad0b6aSJaegeuk Kim struct f2fs_nm_info *nm_i = NM_I(sbi);
298822ad0b6aSJaegeuk Kim unsigned int nat_index = start_nid / NAT_ENTRY_PER_BLOCK;
298922ad0b6aSJaegeuk Kim struct f2fs_nat_block *nat_blk = page_address(page);
299022ad0b6aSJaegeuk Kim int valid = 0;
299137a0ab2aSFan Li int i = 0;
299222ad0b6aSJaegeuk Kim
2993*19426c49SChao Yu if (!enabled_nat_bits(sbi, NULL))
299422ad0b6aSJaegeuk Kim return;
299522ad0b6aSJaegeuk Kim
299637a0ab2aSFan Li if (nat_index == 0) {
299737a0ab2aSFan Li valid = 1;
299837a0ab2aSFan Li i = 1;
299937a0ab2aSFan Li }
300037a0ab2aSFan Li for (; i < NAT_ENTRY_PER_BLOCK; i++) {
300136af5f40SChao Yu if (le32_to_cpu(nat_blk->entries[i].block_addr) != NULL_ADDR)
300222ad0b6aSJaegeuk Kim valid++;
300322ad0b6aSJaegeuk Kim }
3004*19426c49SChao Yu if (valid == 0) {
3005*19426c49SChao Yu __set_bit_le(nat_index, nm_i->empty_nat_bits);
3006*19426c49SChao Yu __clear_bit_le(nat_index, nm_i->full_nat_bits);
3007*19426c49SChao Yu return;
300822ad0b6aSJaegeuk Kim }
300922ad0b6aSJaegeuk Kim
3010*19426c49SChao Yu __clear_bit_le(nat_index, nm_i->empty_nat_bits);
3011*19426c49SChao Yu if (valid == NAT_ENTRY_PER_BLOCK)
3012*19426c49SChao Yu __set_bit_le(nat_index, nm_i->full_nat_bits);
3013*19426c49SChao Yu else
3014*19426c49SChao Yu __clear_bit_le(nat_index, nm_i->full_nat_bits);
301522ad0b6aSJaegeuk Kim }
301622ad0b6aSJaegeuk Kim
__flush_nat_entry_set(struct f2fs_sb_info * sbi,struct nat_entry_set * set,struct cp_control * cpc)3017edc55aafSJaegeuk Kim static int __flush_nat_entry_set(struct f2fs_sb_info *sbi,
301822ad0b6aSJaegeuk Kim struct nat_entry_set *set, struct cp_control *cpc)
3019309cc2b6SJaegeuk Kim {
3020309cc2b6SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
3021b7ad7512SChao Yu struct f2fs_journal *journal = curseg->journal;
3022309cc2b6SJaegeuk Kim nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK;
3023309cc2b6SJaegeuk Kim bool to_journal = true;
3024309cc2b6SJaegeuk Kim struct f2fs_nat_block *nat_blk;
3025309cc2b6SJaegeuk Kim struct nat_entry *ne, *cur;
3026309cc2b6SJaegeuk Kim struct page *page = NULL;
3027aec71382SChao Yu
3028aec71382SChao Yu /*
3029aec71382SChao Yu * there are two steps to flush nat entries:
3030aec71382SChao Yu * #1, flush nat entries to journal in current hot data summary block.
3031aec71382SChao Yu * #2, flush nat entries to nat page.
3032aec71382SChao Yu */
3033*19426c49SChao Yu if (enabled_nat_bits(sbi, cpc) ||
303422ad0b6aSJaegeuk Kim !__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL))
3035aec71382SChao Yu to_journal = false;
3036aec71382SChao Yu
3037aec71382SChao Yu if (to_journal) {
3038b7ad7512SChao Yu down_write(&curseg->journal_rwsem);
3039aec71382SChao Yu } else {
3040e05df3b1SJaegeuk Kim page = get_next_nat_page(sbi, start_nid);
3041edc55aafSJaegeuk Kim if (IS_ERR(page))
3042edc55aafSJaegeuk Kim return PTR_ERR(page);
3043edc55aafSJaegeuk Kim
3044e05df3b1SJaegeuk Kim nat_blk = page_address(page);
30459850cf4aSJaegeuk Kim f2fs_bug_on(sbi, !nat_blk);
3046e05df3b1SJaegeuk Kim }
3047e05df3b1SJaegeuk Kim
3048aec71382SChao Yu /* flush dirty nats in nat entry set */
3049309cc2b6SJaegeuk Kim list_for_each_entry_safe(ne, cur, &set->entry_list, list) {
3050aec71382SChao Yu struct f2fs_nat_entry *raw_ne;
3051aec71382SChao Yu nid_t nid = nat_get_nid(ne);
3052aec71382SChao Yu int offset;
3053aec71382SChao Yu
3054febeca6dSChao Yu f2fs_bug_on(sbi, nat_get_blkaddr(ne) == NEW_ADDR);
3055309cc2b6SJaegeuk Kim
3056aec71382SChao Yu if (to_journal) {
30574d57b86dSChao Yu offset = f2fs_lookup_journal_in_cursum(journal,
3058aec71382SChao Yu NAT_JOURNAL, nid, 1);
30599850cf4aSJaegeuk Kim f2fs_bug_on(sbi, offset < 0);
3060dfc08a12SChao Yu raw_ne = &nat_in_journal(journal, offset);
3061dfc08a12SChao Yu nid_in_journal(journal, offset) = cpu_to_le32(nid);
3062aec71382SChao Yu } else {
3063aec71382SChao Yu raw_ne = &nat_blk->entries[nid - start_nid];
3064aec71382SChao Yu }
3065aec71382SChao Yu raw_nat_from_node_info(raw_ne, &ne->ni);
306688bd02c9SJaegeuk Kim nat_reset_flag(ne);
30670b28b71eSKinglong Mee __clear_nat_cache_dirty(NM_I(sbi), set, ne);
306804d47e67SChao Yu if (nat_get_blkaddr(ne) == NULL_ADDR) {
30695921aaa1SLiFan add_free_nid(sbi, nid, false, true);
30704ac91242SChao Yu } else {
30714ac91242SChao Yu spin_lock(&NM_I(sbi)->nid_list_lock);
3072346fe752SChao Yu update_free_nid_bitmap(sbi, nid, false, false);
307304d47e67SChao Yu spin_unlock(&NM_I(sbi)->nid_list_lock);
307404d47e67SChao Yu }
3075e05df3b1SJaegeuk Kim }
3076aec71382SChao Yu
307722ad0b6aSJaegeuk Kim if (to_journal) {
3078b7ad7512SChao Yu up_write(&curseg->journal_rwsem);
307922ad0b6aSJaegeuk Kim } else {
3080*19426c49SChao Yu __update_nat_bits(sbi, start_nid, page);
3081e05df3b1SJaegeuk Kim f2fs_put_page(page, 1);
308222ad0b6aSJaegeuk Kim }
3083aec71382SChao Yu
308459c9081bSYunlei He /* Allow dirty nats by node block allocation in write_begin */
308559c9081bSYunlei He if (!set->entry_cnt) {
3086309cc2b6SJaegeuk Kim radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
3087309cc2b6SJaegeuk Kim kmem_cache_free(nat_entry_set_slab, set);
3088309cc2b6SJaegeuk Kim }
3089edc55aafSJaegeuk Kim return 0;
309059c9081bSYunlei He }
3091aec71382SChao Yu
3092309cc2b6SJaegeuk Kim /*
3093309cc2b6SJaegeuk Kim * This function is called during the checkpointing process.
3094309cc2b6SJaegeuk Kim */
f2fs_flush_nat_entries(struct f2fs_sb_info * sbi,struct cp_control * cpc)3095edc55aafSJaegeuk Kim int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
3096309cc2b6SJaegeuk Kim {
3097309cc2b6SJaegeuk Kim struct f2fs_nm_info *nm_i = NM_I(sbi);
3098309cc2b6SJaegeuk Kim struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
3099b7ad7512SChao Yu struct f2fs_journal *journal = curseg->journal;
3100c31e4961SChao Yu struct nat_entry_set *setvec[NAT_VEC_SIZE];
3101309cc2b6SJaegeuk Kim struct nat_entry_set *set, *tmp;
3102309cc2b6SJaegeuk Kim unsigned int found;
3103309cc2b6SJaegeuk Kim nid_t set_idx = 0;
3104309cc2b6SJaegeuk Kim LIST_HEAD(sets);
3105edc55aafSJaegeuk Kim int err = 0;
3106309cc2b6SJaegeuk Kim
3107a95ba66aSJaegeuk Kim /*
3108a95ba66aSJaegeuk Kim * during unmount, let's flush nat_bits before checking
3109a95ba66aSJaegeuk Kim * nat_cnt[DIRTY_NAT].
3110a95ba66aSJaegeuk Kim */
3111*19426c49SChao Yu if (enabled_nat_bits(sbi, cpc)) {
3112e4544b63STim Murray f2fs_down_write(&nm_i->nat_tree_lock);
31137f2ecdd8SJaegeuk Kim remove_nats_in_journal(sbi);
3114e4544b63STim Murray f2fs_up_write(&nm_i->nat_tree_lock);
31157f2ecdd8SJaegeuk Kim }
31167f2ecdd8SJaegeuk Kim
3117a95ba66aSJaegeuk Kim if (!nm_i->nat_cnt[DIRTY_NAT])
3118edc55aafSJaegeuk Kim return 0;
3119a5131193SJaegeuk Kim
3120e4544b63STim Murray f2fs_down_write(&nm_i->nat_tree_lock);
3121a5131193SJaegeuk Kim
3122309cc2b6SJaegeuk Kim /*
3123309cc2b6SJaegeuk Kim * if there are no enough space in journal to store dirty nat
3124309cc2b6SJaegeuk Kim * entries, remove all entries from journal and merge them
3125309cc2b6SJaegeuk Kim * into nat entry set.
3126309cc2b6SJaegeuk Kim */
3127*19426c49SChao Yu if (enabled_nat_bits(sbi, cpc) ||
3128a95ba66aSJaegeuk Kim !__has_cursum_space(journal,
3129a95ba66aSJaegeuk Kim nm_i->nat_cnt[DIRTY_NAT], NAT_JOURNAL))
3130309cc2b6SJaegeuk Kim remove_nats_in_journal(sbi);
3131309cc2b6SJaegeuk Kim
3132309cc2b6SJaegeuk Kim while ((found = __gang_lookup_nat_set(nm_i,
3133c31e4961SChao Yu set_idx, NAT_VEC_SIZE, setvec))) {
3134309cc2b6SJaegeuk Kim unsigned idx;
31355f029c04SYi Zhuang
3136309cc2b6SJaegeuk Kim set_idx = setvec[found - 1]->set + 1;
3137309cc2b6SJaegeuk Kim for (idx = 0; idx < found; idx++)
3138309cc2b6SJaegeuk Kim __adjust_nat_entry_set(setvec[idx], &sets,
3139dfc08a12SChao Yu MAX_NAT_JENTRIES(journal));
3140309cc2b6SJaegeuk Kim }
3141309cc2b6SJaegeuk Kim
3142309cc2b6SJaegeuk Kim /* flush dirty nats in nat entry set */
3143edc55aafSJaegeuk Kim list_for_each_entry_safe(set, tmp, &sets, set_list) {
3144edc55aafSJaegeuk Kim err = __flush_nat_entry_set(sbi, set, cpc);
3145edc55aafSJaegeuk Kim if (err)
3146edc55aafSJaegeuk Kim break;
3147edc55aafSJaegeuk Kim }
3148309cc2b6SJaegeuk Kim
3149e4544b63STim Murray f2fs_up_write(&nm_i->nat_tree_lock);
315059c9081bSYunlei He /* Allow dirty nats by node block allocation in write_begin */
3151edc55aafSJaegeuk Kim
3152edc55aafSJaegeuk Kim return err;
3153e05df3b1SJaegeuk Kim }
3154e05df3b1SJaegeuk Kim
__get_nat_bitmaps(struct f2fs_sb_info * sbi)315522ad0b6aSJaegeuk Kim static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
315622ad0b6aSJaegeuk Kim {
315722ad0b6aSJaegeuk Kim struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
315822ad0b6aSJaegeuk Kim struct f2fs_nm_info *nm_i = NM_I(sbi);
315922ad0b6aSJaegeuk Kim unsigned int nat_bits_bytes = nm_i->nat_blocks / BITS_PER_BYTE;
316022ad0b6aSJaegeuk Kim unsigned int i;
316122ad0b6aSJaegeuk Kim __u64 cp_ver = cur_cp_version(ckpt);
316222ad0b6aSJaegeuk Kim block_t nat_bits_addr;
316322ad0b6aSJaegeuk Kim
3164*19426c49SChao Yu if (!enabled_nat_bits(sbi, NULL))
3165*19426c49SChao Yu return 0;
3166*19426c49SChao Yu
3167df033cafSChao Yu nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
31680b6d4ca0SEric Biggers nm_i->nat_bits = f2fs_kvzalloc(sbi,
31698fb9f319SZhiguo Niu F2FS_BLK_TO_BYTES(nm_i->nat_bits_blocks), GFP_KERNEL);
317022ad0b6aSJaegeuk Kim if (!nm_i->nat_bits)
317122ad0b6aSJaegeuk Kim return -ENOMEM;
317222ad0b6aSJaegeuk Kim
3173a60108f7SJaegeuk Kim nat_bits_addr = __start_cp_addr(sbi) + BLKS_PER_SEG(sbi) -
317422ad0b6aSJaegeuk Kim nm_i->nat_bits_blocks;
317522ad0b6aSJaegeuk Kim for (i = 0; i < nm_i->nat_bits_blocks; i++) {
31767735730dSChao Yu struct page *page;
31777735730dSChao Yu
31787735730dSChao Yu page = f2fs_get_meta_page(sbi, nat_bits_addr++);
31793b30eb19SChao Yu if (IS_ERR(page))
31807735730dSChao Yu return PTR_ERR(page);
318122ad0b6aSJaegeuk Kim
31828fb9f319SZhiguo Niu memcpy(nm_i->nat_bits + F2FS_BLK_TO_BYTES(i),
318322ad0b6aSJaegeuk Kim page_address(page), F2FS_BLKSIZE);
318422ad0b6aSJaegeuk Kim f2fs_put_page(page, 1);
318522ad0b6aSJaegeuk Kim }
318622ad0b6aSJaegeuk Kim
3187ced2c7eaSKinglong Mee cp_ver |= (cur_cp_crc(ckpt) << 32);
318822ad0b6aSJaegeuk Kim if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) {
3189*19426c49SChao Yu disable_nat_bits(sbi, true);
319022ad0b6aSJaegeuk Kim return 0;
319122ad0b6aSJaegeuk Kim }
319222ad0b6aSJaegeuk Kim
3193*19426c49SChao Yu nm_i->full_nat_bits = nm_i->nat_bits + 8;
3194*19426c49SChao Yu nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes;
3195*19426c49SChao Yu
3196dcbb4c10SJoe Perches f2fs_notice(sbi, "Found nat_bits in checkpoint");
319722ad0b6aSJaegeuk Kim return 0;
319822ad0b6aSJaegeuk Kim }
319922ad0b6aSJaegeuk Kim
load_free_nid_bitmap(struct f2fs_sb_info * sbi)3200bd80a4b9SHou Pengyang static inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi)
32017041d5d2SChao Yu {
32027041d5d2SChao Yu struct f2fs_nm_info *nm_i = NM_I(sbi);
32037041d5d2SChao Yu unsigned int i = 0;
32047041d5d2SChao Yu nid_t nid, last_nid;
32057041d5d2SChao Yu
3206*19426c49SChao Yu if (!enabled_nat_bits(sbi, NULL))
32077041d5d2SChao Yu return;
32087041d5d2SChao Yu
32097041d5d2SChao Yu for (i = 0; i < nm_i->nat_blocks; i++) {
32107041d5d2SChao Yu i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
32117041d5d2SChao Yu if (i >= nm_i->nat_blocks)
32127041d5d2SChao Yu break;
32137041d5d2SChao Yu
32147041d5d2SChao Yu __set_bit_le(i, nm_i->nat_block_bitmap);
32157041d5d2SChao Yu
32167041d5d2SChao Yu nid = i * NAT_ENTRY_PER_BLOCK;
3217f6986edeSFan Li last_nid = nid + NAT_ENTRY_PER_BLOCK;
32187041d5d2SChao Yu
3219346fe752SChao Yu spin_lock(&NM_I(sbi)->nid_list_lock);
32207041d5d2SChao Yu for (; nid < last_nid; nid++)
3221346fe752SChao Yu update_free_nid_bitmap(sbi, nid, true, true);
3222346fe752SChao Yu spin_unlock(&NM_I(sbi)->nid_list_lock);
32237041d5d2SChao Yu }
32247041d5d2SChao Yu
32257041d5d2SChao Yu for (i = 0; i < nm_i->nat_blocks; i++) {
32267041d5d2SChao Yu i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
32277041d5d2SChao Yu if (i >= nm_i->nat_blocks)
32287041d5d2SChao Yu break;
32297041d5d2SChao Yu
32307041d5d2SChao Yu __set_bit_le(i, nm_i->nat_block_bitmap);
32317041d5d2SChao Yu }
32327041d5d2SChao Yu }
32337041d5d2SChao Yu
init_node_manager(struct f2fs_sb_info * sbi)3234e05df3b1SJaegeuk Kim static int init_node_manager(struct f2fs_sb_info *sbi)
3235e05df3b1SJaegeuk Kim {
3236e05df3b1SJaegeuk Kim struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
3237e05df3b1SJaegeuk Kim struct f2fs_nm_info *nm_i = NM_I(sbi);
3238e05df3b1SJaegeuk Kim unsigned char *version_bitmap;
323922ad0b6aSJaegeuk Kim unsigned int nat_segs;
324022ad0b6aSJaegeuk Kim int err;
3241e05df3b1SJaegeuk Kim
3242e05df3b1SJaegeuk Kim nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
3243e05df3b1SJaegeuk Kim
3244e05df3b1SJaegeuk Kim /* segment_count_nat includes pair segment so divide to 2. */
3245e05df3b1SJaegeuk Kim nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
324622ad0b6aSJaegeuk Kim nm_i->nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
324722ad0b6aSJaegeuk Kim nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks;
32487ee0eeabSJaegeuk Kim
3249b63da15eSJaegeuk Kim /* not used nids: 0, node, meta, (and root counted as valid node) */
325004d47e67SChao Yu nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count -
325127cae0bcSChao Yu F2FS_RESERVED_NODE_NUM;
32529a4ffdf5SChao Yu nm_i->nid_cnt[FREE_NID] = 0;
32539a4ffdf5SChao Yu nm_i->nid_cnt[PREALLOC_NID] = 0;
3254cdfc41c1SJaegeuk Kim nm_i->ram_thresh = DEF_RAM_THRESHOLD;
3255ea1a29a0SChao Yu nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
32562304cb0cSChao Yu nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
325747c8ebccSJaegeuk Kim nm_i->max_rf_node_blocks = DEF_RF_NODE_BLOCKS;
3258e05df3b1SJaegeuk Kim
32598a7ed66aSJaegeuk Kim INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
32609a4ffdf5SChao Yu INIT_LIST_HEAD(&nm_i->free_nid_list);
3261769ec6e5SJaegeuk Kim INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
3262769ec6e5SJaegeuk Kim INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
3263e05df3b1SJaegeuk Kim INIT_LIST_HEAD(&nm_i->nat_entries);
326422969158SChao Yu spin_lock_init(&nm_i->nat_list_lock);
3265e05df3b1SJaegeuk Kim
3266e05df3b1SJaegeuk Kim mutex_init(&nm_i->build_lock);
3267b8559dc2SChao Yu spin_lock_init(&nm_i->nid_list_lock);
3268e4544b63STim Murray init_f2fs_rwsem(&nm_i->nat_tree_lock);
3269e05df3b1SJaegeuk Kim
3270e05df3b1SJaegeuk Kim nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
327179b5793bSAlexandru Gheorghiu nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
3272e05df3b1SJaegeuk Kim version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
327379b5793bSAlexandru Gheorghiu nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
327479b5793bSAlexandru Gheorghiu GFP_KERNEL);
327579b5793bSAlexandru Gheorghiu if (!nm_i->nat_bitmap)
327679b5793bSAlexandru Gheorghiu return -ENOMEM;
3277599a09b2SChao Yu
3278c2ecba02SChao Yu if (!test_opt(sbi, NAT_BITS))
3279c2ecba02SChao Yu disable_nat_bits(sbi, true);
3280c2ecba02SChao Yu
328122ad0b6aSJaegeuk Kim err = __get_nat_bitmaps(sbi);
328222ad0b6aSJaegeuk Kim if (err)
328322ad0b6aSJaegeuk Kim return err;
328422ad0b6aSJaegeuk Kim
3285599a09b2SChao Yu #ifdef CONFIG_F2FS_CHECK_FS
3286599a09b2SChao Yu nm_i->nat_bitmap_mir = kmemdup(version_bitmap, nm_i->bitmap_size,
3287599a09b2SChao Yu GFP_KERNEL);
3288599a09b2SChao Yu if (!nm_i->nat_bitmap_mir)
3289599a09b2SChao Yu return -ENOMEM;
3290599a09b2SChao Yu #endif
3291599a09b2SChao Yu
3292e05df3b1SJaegeuk Kim return 0;
3293e05df3b1SJaegeuk Kim }
3294e05df3b1SJaegeuk Kim
init_free_nid_cache(struct f2fs_sb_info * sbi)32959f7e4a2cSJaegeuk Kim static int init_free_nid_cache(struct f2fs_sb_info *sbi)
32964ac91242SChao Yu {
32974ac91242SChao Yu struct f2fs_nm_info *nm_i = NM_I(sbi);
3298bb1105e4SJaegeuk Kim int i;
32994ac91242SChao Yu
3300026f0507SKees Cook nm_i->free_nid_bitmap =
33010b6d4ca0SEric Biggers f2fs_kvzalloc(sbi, array_size(sizeof(unsigned char *),
3302026f0507SKees Cook nm_i->nat_blocks),
3303026f0507SKees Cook GFP_KERNEL);
33044ac91242SChao Yu if (!nm_i->free_nid_bitmap)
33054ac91242SChao Yu return -ENOMEM;
33064ac91242SChao Yu
3307bb1105e4SJaegeuk Kim for (i = 0; i < nm_i->nat_blocks; i++) {
3308bb1105e4SJaegeuk Kim nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi,
3309e15d54d5SYunlei He f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK), GFP_KERNEL);
331068c43a23SYunlei He if (!nm_i->free_nid_bitmap[i])
3311bb1105e4SJaegeuk Kim return -ENOMEM;
3312bb1105e4SJaegeuk Kim }
3313bb1105e4SJaegeuk Kim
3314628b3d14SChao Yu nm_i->nat_block_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks / 8,
33154ac91242SChao Yu GFP_KERNEL);
33164ac91242SChao Yu if (!nm_i->nat_block_bitmap)
33174ac91242SChao Yu return -ENOMEM;
3318586d1492SChao Yu
33199d2a789cSKees Cook nm_i->free_nid_count =
33209d2a789cSKees Cook f2fs_kvzalloc(sbi, array_size(sizeof(unsigned short),
33219d2a789cSKees Cook nm_i->nat_blocks),
33229d2a789cSKees Cook GFP_KERNEL);
3323586d1492SChao Yu if (!nm_i->free_nid_count)
3324586d1492SChao Yu return -ENOMEM;
33254ac91242SChao Yu return 0;
33264ac91242SChao Yu }
33274ac91242SChao Yu
f2fs_build_node_manager(struct f2fs_sb_info * sbi)33284d57b86dSChao Yu int f2fs_build_node_manager(struct f2fs_sb_info *sbi)
3329e05df3b1SJaegeuk Kim {
3330e05df3b1SJaegeuk Kim int err;
3331e05df3b1SJaegeuk Kim
3332acbf054dSChao Yu sbi->nm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_nm_info),
3333acbf054dSChao Yu GFP_KERNEL);
3334e05df3b1SJaegeuk Kim if (!sbi->nm_info)
3335e05df3b1SJaegeuk Kim return -ENOMEM;
3336e05df3b1SJaegeuk Kim
3337e05df3b1SJaegeuk Kim err = init_node_manager(sbi);
3338e05df3b1SJaegeuk Kim if (err)
3339e05df3b1SJaegeuk Kim return err;
3340e05df3b1SJaegeuk Kim
33414ac91242SChao Yu err = init_free_nid_cache(sbi);
33424ac91242SChao Yu if (err)
33434ac91242SChao Yu return err;
33444ac91242SChao Yu
33457041d5d2SChao Yu /* load free nid status from nat_bits table */
33467041d5d2SChao Yu load_free_nid_bitmap(sbi);
33477041d5d2SChao Yu
3348e2374015SChao Yu return f2fs_build_free_nids(sbi, true, true);
3349e05df3b1SJaegeuk Kim }
3350e05df3b1SJaegeuk Kim
f2fs_destroy_node_manager(struct f2fs_sb_info * sbi)33514d57b86dSChao Yu void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
3352e05df3b1SJaegeuk Kim {
3353e05df3b1SJaegeuk Kim struct f2fs_nm_info *nm_i = NM_I(sbi);
3354e05df3b1SJaegeuk Kim struct free_nid *i, *next_i;
3355c31e4961SChao Yu void *vec[NAT_VEC_SIZE];
3356c31e4961SChao Yu struct nat_entry **natvec = (struct nat_entry **)vec;
3357c31e4961SChao Yu struct nat_entry_set **setvec = (struct nat_entry_set **)vec;
3358e05df3b1SJaegeuk Kim nid_t nid = 0;
3359e05df3b1SJaegeuk Kim unsigned int found;
3360e05df3b1SJaegeuk Kim
3361e05df3b1SJaegeuk Kim if (!nm_i)
3362e05df3b1SJaegeuk Kim return;
3363e05df3b1SJaegeuk Kim
3364e05df3b1SJaegeuk Kim /* destroy free nid list */
3365b8559dc2SChao Yu spin_lock(&nm_i->nid_list_lock);
33669a4ffdf5SChao Yu list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
3367a0761f63SFan Li __remove_free_nid(sbi, i, FREE_NID);
3368b8559dc2SChao Yu spin_unlock(&nm_i->nid_list_lock);
3369cf0ee0f0SChao Yu kmem_cache_free(free_nid_slab, i);
3370b8559dc2SChao Yu spin_lock(&nm_i->nid_list_lock);
3371e05df3b1SJaegeuk Kim }
33729a4ffdf5SChao Yu f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID]);
33739a4ffdf5SChao Yu f2fs_bug_on(sbi, nm_i->nid_cnt[PREALLOC_NID]);
33749a4ffdf5SChao Yu f2fs_bug_on(sbi, !list_empty(&nm_i->free_nid_list));
3375b8559dc2SChao Yu spin_unlock(&nm_i->nid_list_lock);
3376e05df3b1SJaegeuk Kim
3377e05df3b1SJaegeuk Kim /* destroy nat cache */
3378e4544b63STim Murray f2fs_down_write(&nm_i->nat_tree_lock);
3379e05df3b1SJaegeuk Kim while ((found = __gang_lookup_nat_cache(nm_i,
3380c31e4961SChao Yu nid, NAT_VEC_SIZE, natvec))) {
3381e05df3b1SJaegeuk Kim unsigned idx;
33827aed0d45SJaegeuk Kim
3383b6ce391eSGu Zheng nid = nat_get_nid(natvec[found - 1]) + 1;
338422969158SChao Yu for (idx = 0; idx < found; idx++) {
338522969158SChao Yu spin_lock(&nm_i->nat_list_lock);
338622969158SChao Yu list_del(&natvec[idx]->list);
338722969158SChao Yu spin_unlock(&nm_i->nat_list_lock);
338822969158SChao Yu
3389b6ce391eSGu Zheng __del_from_nat_cache(nm_i, natvec[idx]);
3390e05df3b1SJaegeuk Kim }
339122969158SChao Yu }
3392a95ba66aSJaegeuk Kim f2fs_bug_on(sbi, nm_i->nat_cnt[TOTAL_NAT]);
33937aed0d45SJaegeuk Kim
33947aed0d45SJaegeuk Kim /* destroy nat set cache */
33957aed0d45SJaegeuk Kim nid = 0;
3396c31e4961SChao Yu memset(vec, 0, sizeof(void *) * NAT_VEC_SIZE);
33977aed0d45SJaegeuk Kim while ((found = __gang_lookup_nat_set(nm_i,
3398c31e4961SChao Yu nid, NAT_VEC_SIZE, setvec))) {
33997aed0d45SJaegeuk Kim unsigned idx;
34007aed0d45SJaegeuk Kim
34017aed0d45SJaegeuk Kim nid = setvec[found - 1]->set + 1;
34027aed0d45SJaegeuk Kim for (idx = 0; idx < found; idx++) {
34037aed0d45SJaegeuk Kim /* entry_cnt is not zero, when cp_error was occurred */
34047aed0d45SJaegeuk Kim f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list));
34057aed0d45SJaegeuk Kim radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set);
34067aed0d45SJaegeuk Kim kmem_cache_free(nat_entry_set_slab, setvec[idx]);
34077aed0d45SJaegeuk Kim }
34087aed0d45SJaegeuk Kim }
3409e4544b63STim Murray f2fs_up_write(&nm_i->nat_tree_lock);
3410e05df3b1SJaegeuk Kim
34114ac91242SChao Yu kvfree(nm_i->nat_block_bitmap);
3412bb1105e4SJaegeuk Kim if (nm_i->free_nid_bitmap) {
3413bb1105e4SJaegeuk Kim int i;
3414bb1105e4SJaegeuk Kim
3415bb1105e4SJaegeuk Kim for (i = 0; i < nm_i->nat_blocks; i++)
3416bb1105e4SJaegeuk Kim kvfree(nm_i->free_nid_bitmap[i]);
34175222595dSJaegeuk Kim kvfree(nm_i->free_nid_bitmap);
3418bb1105e4SJaegeuk Kim }
3419586d1492SChao Yu kvfree(nm_i->free_nid_count);
34204ac91242SChao Yu
34215222595dSJaegeuk Kim kvfree(nm_i->nat_bitmap);
34225222595dSJaegeuk Kim kvfree(nm_i->nat_bits);
3423599a09b2SChao Yu #ifdef CONFIG_F2FS_CHECK_FS
34245222595dSJaegeuk Kim kvfree(nm_i->nat_bitmap_mir);
3425599a09b2SChao Yu #endif
3426e05df3b1SJaegeuk Kim sbi->nm_info = NULL;
3427c8eb7024SChao Yu kfree(nm_i);
3428e05df3b1SJaegeuk Kim }
3429e05df3b1SJaegeuk Kim
f2fs_create_node_manager_caches(void)34304d57b86dSChao Yu int __init f2fs_create_node_manager_caches(void)
3431e05df3b1SJaegeuk Kim {
343298510003SChao Yu nat_entry_slab = f2fs_kmem_cache_create("f2fs_nat_entry",
3433e8512d2eSGu Zheng sizeof(struct nat_entry));
3434e05df3b1SJaegeuk Kim if (!nat_entry_slab)
3435aec71382SChao Yu goto fail;
3436e05df3b1SJaegeuk Kim
343798510003SChao Yu free_nid_slab = f2fs_kmem_cache_create("f2fs_free_nid",
3438e8512d2eSGu Zheng sizeof(struct free_nid));
3439aec71382SChao Yu if (!free_nid_slab)
3440ce3e6d25SMarkus Elfring goto destroy_nat_entry;
3441aec71382SChao Yu
344298510003SChao Yu nat_entry_set_slab = f2fs_kmem_cache_create("f2fs_nat_entry_set",
3443aec71382SChao Yu sizeof(struct nat_entry_set));
3444aec71382SChao Yu if (!nat_entry_set_slab)
3445ce3e6d25SMarkus Elfring goto destroy_free_nid;
344650fa53ecSChao Yu
344798510003SChao Yu fsync_node_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_node_entry",
344850fa53ecSChao Yu sizeof(struct fsync_node_entry));
344950fa53ecSChao Yu if (!fsync_node_entry_slab)
345050fa53ecSChao Yu goto destroy_nat_entry_set;
3451e05df3b1SJaegeuk Kim return 0;
3452aec71382SChao Yu
345350fa53ecSChao Yu destroy_nat_entry_set:
345450fa53ecSChao Yu kmem_cache_destroy(nat_entry_set_slab);
3455ce3e6d25SMarkus Elfring destroy_free_nid:
3456aec71382SChao Yu kmem_cache_destroy(free_nid_slab);
3457ce3e6d25SMarkus Elfring destroy_nat_entry:
3458aec71382SChao Yu kmem_cache_destroy(nat_entry_slab);
3459aec71382SChao Yu fail:
3460aec71382SChao Yu return -ENOMEM;
3461e05df3b1SJaegeuk Kim }
3462e05df3b1SJaegeuk Kim
f2fs_destroy_node_manager_caches(void)34634d57b86dSChao Yu void f2fs_destroy_node_manager_caches(void)
3464e05df3b1SJaegeuk Kim {
346550fa53ecSChao Yu kmem_cache_destroy(fsync_node_entry_slab);
3466aec71382SChao Yu kmem_cache_destroy(nat_entry_set_slab);
3467e05df3b1SJaegeuk Kim kmem_cache_destroy(free_nid_slab);
3468e05df3b1SJaegeuk Kim kmem_cache_destroy(nat_entry_slab);
3469e05df3b1SJaegeuk Kim }
3470