1d29fbcdbSNishad Kamdar /* SPDX-License-Identifier: GPL-2.0 */
20a8165d7SJaegeuk Kim /*
339a53e0cSJaegeuk Kim * fs/f2fs/node.h
439a53e0cSJaegeuk Kim *
539a53e0cSJaegeuk Kim * Copyright (c) 2012 Samsung Electronics Co., Ltd.
639a53e0cSJaegeuk Kim * http://www.samsung.com/
739a53e0cSJaegeuk Kim */
839a53e0cSJaegeuk Kim /* start node id of a node block dedicated to the given node id */
968afcf2dSTomohiro Kusumi #define START_NID(nid) (((nid) / NAT_ENTRY_PER_BLOCK) * NAT_ENTRY_PER_BLOCK)
1039a53e0cSJaegeuk Kim
1139a53e0cSJaegeuk Kim /* node block offset on the NAT area dedicated to the given start node id */
1268afcf2dSTomohiro Kusumi #define NAT_BLOCK_OFFSET(start_nid) ((start_nid) / NAT_ENTRY_PER_BLOCK)
1339a53e0cSJaegeuk Kim
14ea1a29a0SChao Yu /* # of pages to perform synchronous readahead before building free nids */
15ad4edb83SJaegeuk Kim #define FREE_NID_PAGES 8
16ad4edb83SJaegeuk Kim #define MAX_FREE_NIDS (NAT_ENTRY_PER_BLOCK * FREE_NID_PAGES)
1739a53e0cSJaegeuk Kim
18042be373SChao Yu /* size of free nid batch when shrinking */
19042be373SChao Yu #define SHRINK_NID_BATCH_SIZE 8
20042be373SChao Yu
21ad4edb83SJaegeuk Kim #define DEF_RA_NID_PAGES 0 /* # of nid pages to be readaheaded */
22ea1a29a0SChao Yu
2339a53e0cSJaegeuk Kim /* maximum readahead size for node during getting data blocks */
2439a53e0cSJaegeuk Kim #define MAX_RA_NODE 128
2539a53e0cSJaegeuk Kim
26cdfc41c1SJaegeuk Kim /* control the memory footprint threshold (10MB per 1GB ram) */
2729710bcfSJaegeuk Kim #define DEF_RAM_THRESHOLD 1
28cdfc41c1SJaegeuk Kim
297d768d2cSChao Yu /* control dirty nats ratio threshold (default: 10% over max nid count) */
307d768d2cSChao Yu #define DEF_DIRTY_NAT_RATIO_THRESHOLD 10
31e589c2c4SJaegeuk Kim /* control total # of nats */
32e589c2c4SJaegeuk Kim #define DEF_NAT_CACHE_THRESHOLD 100000
337d768d2cSChao Yu
3447c8ebccSJaegeuk Kim /* control total # of node writes used for roll-fowrad recovery */
3547c8ebccSJaegeuk Kim #define DEF_RF_NODE_BLOCKS 0
3647c8ebccSJaegeuk Kim
3739a53e0cSJaegeuk Kim /* vector size for gang look-up from nat cache that consists of radix tree */
38c31e4961SChao Yu #define NAT_VEC_SIZE 32
3939a53e0cSJaegeuk Kim
4056ae674cSJaegeuk Kim /* return value for read_node_page */
4156ae674cSJaegeuk Kim #define LOCKED_PAGE 1
4256ae674cSJaegeuk Kim
43859fca6bSChao Yu /* check pinned file's alignment status of physical blocks */
44859fca6bSChao Yu #define FILE_NOT_ALIGNED 1
45859fca6bSChao Yu
465c27f4eeSChao Yu /* For flag in struct node_info */
475c27f4eeSChao Yu enum {
485c27f4eeSChao Yu IS_CHECKPOINTED, /* is it checkpointed before? */
495c27f4eeSChao Yu HAS_FSYNCED_INODE, /* is the inode fsynced before? */
505c27f4eeSChao Yu HAS_LAST_FSYNC, /* has the latest node fsync mark? */
515c27f4eeSChao Yu IS_DIRTY, /* this nat entry is dirty? */
52780de47cSChao Yu IS_PREALLOC, /* nat entry is preallocated */
535c27f4eeSChao Yu };
545c27f4eeSChao Yu
551cf6b567SChao Yu /* For node type in __get_node_folio() */
561cf6b567SChao Yu enum node_type {
571cf6b567SChao Yu NODE_TYPE_REGULAR,
581cf6b567SChao Yu NODE_TYPE_INODE,
59*2aac2538SChao Yu NODE_TYPE_XATTR,
601cf6b567SChao Yu };
611cf6b567SChao Yu
6239a53e0cSJaegeuk Kim /*
6339a53e0cSJaegeuk Kim * For node information
6439a53e0cSJaegeuk Kim */
6539a53e0cSJaegeuk Kim struct node_info {
6639a53e0cSJaegeuk Kim nid_t nid; /* node id */
6739a53e0cSJaegeuk Kim nid_t ino; /* inode number of the node's owner */
6839a53e0cSJaegeuk Kim block_t blk_addr; /* block address of the node */
6939a53e0cSJaegeuk Kim unsigned char version; /* version of the node */
705c27f4eeSChao Yu unsigned char flag; /* for node information bits */
717ef35e3bSJaegeuk Kim };
727ef35e3bSJaegeuk Kim
7339a53e0cSJaegeuk Kim struct nat_entry {
7439a53e0cSJaegeuk Kim struct list_head list; /* for clean or dirty nat list */
7539a53e0cSJaegeuk Kim struct node_info ni; /* in-memory node information */
7639a53e0cSJaegeuk Kim };
7739a53e0cSJaegeuk Kim
7868afcf2dSTomohiro Kusumi #define nat_get_nid(nat) ((nat)->ni.nid)
7968afcf2dSTomohiro Kusumi #define nat_set_nid(nat, n) ((nat)->ni.nid = (n))
8068afcf2dSTomohiro Kusumi #define nat_get_blkaddr(nat) ((nat)->ni.blk_addr)
8168afcf2dSTomohiro Kusumi #define nat_set_blkaddr(nat, b) ((nat)->ni.blk_addr = (b))
8268afcf2dSTomohiro Kusumi #define nat_get_ino(nat) ((nat)->ni.ino)
8368afcf2dSTomohiro Kusumi #define nat_set_ino(nat, i) ((nat)->ni.ino = (i))
8468afcf2dSTomohiro Kusumi #define nat_get_version(nat) ((nat)->ni.version)
8568afcf2dSTomohiro Kusumi #define nat_set_version(nat, v) ((nat)->ni.version = (v))
8639a53e0cSJaegeuk Kim
8768afcf2dSTomohiro Kusumi #define inc_node_version(version) (++(version))
8839a53e0cSJaegeuk Kim
copy_node_info(struct node_info * dst,struct node_info * src)895c27f4eeSChao Yu static inline void copy_node_info(struct node_info *dst,
905c27f4eeSChao Yu struct node_info *src)
915c27f4eeSChao Yu {
925c27f4eeSChao Yu dst->nid = src->nid;
935c27f4eeSChao Yu dst->ino = src->ino;
945c27f4eeSChao Yu dst->blk_addr = src->blk_addr;
955c27f4eeSChao Yu dst->version = src->version;
965c27f4eeSChao Yu /* should not copy flag here */
975c27f4eeSChao Yu }
985c27f4eeSChao Yu
set_nat_flag(struct nat_entry * ne,unsigned int type,bool set)997ef35e3bSJaegeuk Kim static inline void set_nat_flag(struct nat_entry *ne,
1007ef35e3bSJaegeuk Kim unsigned int type, bool set)
1017ef35e3bSJaegeuk Kim {
1027ef35e3bSJaegeuk Kim if (set)
103447286ebSYangtao Li ne->ni.flag |= BIT(type);
1047ef35e3bSJaegeuk Kim else
105447286ebSYangtao Li ne->ni.flag &= ~BIT(type);
1067ef35e3bSJaegeuk Kim }
1077ef35e3bSJaegeuk Kim
get_nat_flag(struct nat_entry * ne,unsigned int type)1087ef35e3bSJaegeuk Kim static inline bool get_nat_flag(struct nat_entry *ne, unsigned int type)
1097ef35e3bSJaegeuk Kim {
110447286ebSYangtao Li return ne->ni.flag & BIT(type);
1117ef35e3bSJaegeuk Kim }
1127ef35e3bSJaegeuk Kim
nat_reset_flag(struct nat_entry * ne)11388bd02c9SJaegeuk Kim static inline void nat_reset_flag(struct nat_entry *ne)
11488bd02c9SJaegeuk Kim {
11588bd02c9SJaegeuk Kim /* these states can be set only after checkpoint was done */
11688bd02c9SJaegeuk Kim set_nat_flag(ne, IS_CHECKPOINTED, true);
11788bd02c9SJaegeuk Kim set_nat_flag(ne, HAS_FSYNCED_INODE, false);
11888bd02c9SJaegeuk Kim set_nat_flag(ne, HAS_LAST_FSYNC, true);
11988bd02c9SJaegeuk Kim }
12088bd02c9SJaegeuk Kim
node_info_from_raw_nat(struct node_info * ni,struct f2fs_nat_entry * raw_ne)12139a53e0cSJaegeuk Kim static inline void node_info_from_raw_nat(struct node_info *ni,
12239a53e0cSJaegeuk Kim struct f2fs_nat_entry *raw_ne)
12339a53e0cSJaegeuk Kim {
12439a53e0cSJaegeuk Kim ni->ino = le32_to_cpu(raw_ne->ino);
12539a53e0cSJaegeuk Kim ni->blk_addr = le32_to_cpu(raw_ne->block_addr);
12639a53e0cSJaegeuk Kim ni->version = raw_ne->version;
12739a53e0cSJaegeuk Kim }
12839a53e0cSJaegeuk Kim
raw_nat_from_node_info(struct f2fs_nat_entry * raw_ne,struct node_info * ni)12994dac22eSChao Yu static inline void raw_nat_from_node_info(struct f2fs_nat_entry *raw_ne,
13094dac22eSChao Yu struct node_info *ni)
13194dac22eSChao Yu {
13294dac22eSChao Yu raw_ne->ino = cpu_to_le32(ni->ino);
13394dac22eSChao Yu raw_ne->block_addr = cpu_to_le32(ni->blk_addr);
13494dac22eSChao Yu raw_ne->version = ni->version;
13594dac22eSChao Yu }
13694dac22eSChao Yu
excess_dirty_nats(struct f2fs_sb_info * sbi)1377d768d2cSChao Yu static inline bool excess_dirty_nats(struct f2fs_sb_info *sbi)
1387d768d2cSChao Yu {
139a95ba66aSJaegeuk Kim return NM_I(sbi)->nat_cnt[DIRTY_NAT] >= NM_I(sbi)->max_nid *
1402304cb0cSChao Yu NM_I(sbi)->dirty_nats_ratio / 100;
1417d768d2cSChao Yu }
1427d768d2cSChao Yu
excess_cached_nats(struct f2fs_sb_info * sbi)143e589c2c4SJaegeuk Kim static inline bool excess_cached_nats(struct f2fs_sb_info *sbi)
144e589c2c4SJaegeuk Kim {
145a95ba66aSJaegeuk Kim return NM_I(sbi)->nat_cnt[TOTAL_NAT] >= DEF_NAT_CACHE_THRESHOLD;
146e589c2c4SJaegeuk Kim }
147e589c2c4SJaegeuk Kim
1486fb03f3aSJaegeuk Kim enum mem_type {
149cdfc41c1SJaegeuk Kim FREE_NIDS, /* indicates the free nid list */
1506fb03f3aSJaegeuk Kim NAT_ENTRIES, /* indicates the cached nat entry */
151a1257023SJaegeuk Kim DIRTY_DENTS, /* indicates dirty dentry pages */
152e5e7ea3cSJaegeuk Kim INO_ENTRIES, /* indicates inode entries */
15312607c1bSJaegeuk Kim READ_EXTENT_CACHE, /* indicates read extent cache */
15471644dffSJaegeuk Kim AGE_EXTENT_CACHE, /* indicates age extent cache */
155d6d2b491SSahitya Tummala DISCARD_CACHE, /* indicates memory of cached discard cmds */
1566ce19affSChao Yu COMPRESS_PAGE, /* indicates memory of cached compressed pages */
1571e84371fSJaegeuk Kim BASE_CHECK, /* check kernel status */
158cdfc41c1SJaegeuk Kim };
159cdfc41c1SJaegeuk Kim
160aec71382SChao Yu struct nat_entry_set {
161309cc2b6SJaegeuk Kim struct list_head set_list; /* link with other nat sets */
162aec71382SChao Yu struct list_head entry_list; /* link with dirty nat entries */
163309cc2b6SJaegeuk Kim nid_t set; /* set number*/
164aec71382SChao Yu unsigned int entry_cnt; /* the # of nat entries in set */
165aec71382SChao Yu };
166aec71382SChao Yu
16739a53e0cSJaegeuk Kim struct free_nid {
16839a53e0cSJaegeuk Kim struct list_head list; /* for free node id list */
16939a53e0cSJaegeuk Kim nid_t nid; /* node id */
1709a4ffdf5SChao Yu int state; /* in use or not: FREE_NID or PREALLOC_NID */
17139a53e0cSJaegeuk Kim };
17239a53e0cSJaegeuk Kim
next_free_nid(struct f2fs_sb_info * sbi,nid_t * nid)173120c2cbaSJaegeuk Kim static inline void next_free_nid(struct f2fs_sb_info *sbi, nid_t *nid)
17439a53e0cSJaegeuk Kim {
17539a53e0cSJaegeuk Kim struct f2fs_nm_info *nm_i = NM_I(sbi);
17639a53e0cSJaegeuk Kim struct free_nid *fnid;
17739a53e0cSJaegeuk Kim
178b8559dc2SChao Yu spin_lock(&nm_i->nid_list_lock);
1799a4ffdf5SChao Yu if (nm_i->nid_cnt[FREE_NID] <= 0) {
180b8559dc2SChao Yu spin_unlock(&nm_i->nid_list_lock);
181120c2cbaSJaegeuk Kim return;
182c6e48930SHuang Ying }
1839a4ffdf5SChao Yu fnid = list_first_entry(&nm_i->free_nid_list, struct free_nid, list);
18439a53e0cSJaegeuk Kim *nid = fnid->nid;
185b8559dc2SChao Yu spin_unlock(&nm_i->nid_list_lock);
18639a53e0cSJaegeuk Kim }
18739a53e0cSJaegeuk Kim
18839a53e0cSJaegeuk Kim /*
18939a53e0cSJaegeuk Kim * inline functions
19039a53e0cSJaegeuk Kim */
get_nat_bitmap(struct f2fs_sb_info * sbi,void * addr)19139a53e0cSJaegeuk Kim static inline void get_nat_bitmap(struct f2fs_sb_info *sbi, void *addr)
19239a53e0cSJaegeuk Kim {
19339a53e0cSJaegeuk Kim struct f2fs_nm_info *nm_i = NM_I(sbi);
194599a09b2SChao Yu
195599a09b2SChao Yu #ifdef CONFIG_F2FS_CHECK_FS
196599a09b2SChao Yu if (memcmp(nm_i->nat_bitmap, nm_i->nat_bitmap_mir,
197599a09b2SChao Yu nm_i->bitmap_size))
198599a09b2SChao Yu f2fs_bug_on(sbi, 1);
199599a09b2SChao Yu #endif
20039a53e0cSJaegeuk Kim memcpy(addr, nm_i->nat_bitmap, nm_i->bitmap_size);
20139a53e0cSJaegeuk Kim }
20239a53e0cSJaegeuk Kim
current_nat_addr(struct f2fs_sb_info * sbi,nid_t start)20339a53e0cSJaegeuk Kim static inline pgoff_t current_nat_addr(struct f2fs_sb_info *sbi, nid_t start)
20439a53e0cSJaegeuk Kim {
20539a53e0cSJaegeuk Kim struct f2fs_nm_info *nm_i = NM_I(sbi);
20639a53e0cSJaegeuk Kim pgoff_t block_off;
20739a53e0cSJaegeuk Kim pgoff_t block_addr;
20839a53e0cSJaegeuk Kim
2098a6aa325SFan Li /*
2108a6aa325SFan Li * block_off = segment_off * 512 + off_in_segment
2118a6aa325SFan Li * OLD = (segment_off * 512) * 2 + off_in_segment
2128a6aa325SFan Li * NEW = 2 * (segment_off * 512 + off_in_segment) - off_in_segment
2138a6aa325SFan Li */
21439a53e0cSJaegeuk Kim block_off = NAT_BLOCK_OFFSET(start);
21539a53e0cSJaegeuk Kim
21639a53e0cSJaegeuk Kim block_addr = (pgoff_t)(nm_i->nat_blkaddr +
2178a6aa325SFan Li (block_off << 1) -
218a60108f7SJaegeuk Kim (block_off & (BLKS_PER_SEG(sbi) - 1)));
21939a53e0cSJaegeuk Kim
22039a53e0cSJaegeuk Kim if (f2fs_test_bit(block_off, nm_i->nat_bitmap))
221a60108f7SJaegeuk Kim block_addr += BLKS_PER_SEG(sbi);
22239a53e0cSJaegeuk Kim
22339a53e0cSJaegeuk Kim return block_addr;
22439a53e0cSJaegeuk Kim }
22539a53e0cSJaegeuk Kim
next_nat_addr(struct f2fs_sb_info * sbi,pgoff_t block_addr)22639a53e0cSJaegeuk Kim static inline pgoff_t next_nat_addr(struct f2fs_sb_info *sbi,
22739a53e0cSJaegeuk Kim pgoff_t block_addr)
22839a53e0cSJaegeuk Kim {
22939a53e0cSJaegeuk Kim struct f2fs_nm_info *nm_i = NM_I(sbi);
23039a53e0cSJaegeuk Kim
23139a53e0cSJaegeuk Kim block_addr -= nm_i->nat_blkaddr;
232447286ebSYangtao Li block_addr ^= BIT(sbi->log_blocks_per_seg);
23339a53e0cSJaegeuk Kim return block_addr + nm_i->nat_blkaddr;
23439a53e0cSJaegeuk Kim }
23539a53e0cSJaegeuk Kim
set_to_next_nat(struct f2fs_nm_info * nm_i,nid_t start_nid)23639a53e0cSJaegeuk Kim static inline void set_to_next_nat(struct f2fs_nm_info *nm_i, nid_t start_nid)
23739a53e0cSJaegeuk Kim {
23839a53e0cSJaegeuk Kim unsigned int block_off = NAT_BLOCK_OFFSET(start_nid);
23939a53e0cSJaegeuk Kim
240c6ac4c0eSGu Zheng f2fs_change_bit(block_off, nm_i->nat_bitmap);
241599a09b2SChao Yu #ifdef CONFIG_F2FS_CHECK_FS
242599a09b2SChao Yu f2fs_change_bit(block_off, nm_i->nat_bitmap_mir);
243599a09b2SChao Yu #endif
24439a53e0cSJaegeuk Kim }
24539a53e0cSJaegeuk Kim
ino_of_node(struct page * node_page)246a468f0efSJaegeuk Kim static inline nid_t ino_of_node(struct page *node_page)
247a468f0efSJaegeuk Kim {
248a468f0efSJaegeuk Kim struct f2fs_node *rn = F2FS_NODE(node_page);
249a468f0efSJaegeuk Kim return le32_to_cpu(rn->footer.ino);
250a468f0efSJaegeuk Kim }
251a468f0efSJaegeuk Kim
nid_of_node(struct page * node_page)252a468f0efSJaegeuk Kim static inline nid_t nid_of_node(struct page *node_page)
253a468f0efSJaegeuk Kim {
254a468f0efSJaegeuk Kim struct f2fs_node *rn = F2FS_NODE(node_page);
255a468f0efSJaegeuk Kim return le32_to_cpu(rn->footer.nid);
256a468f0efSJaegeuk Kim }
257a468f0efSJaegeuk Kim
ofs_of_node(const struct page * node_page)258521a4684SMatthew Wilcox (Oracle) static inline unsigned int ofs_of_node(const struct page *node_page)
259a468f0efSJaegeuk Kim {
260a468f0efSJaegeuk Kim struct f2fs_node *rn = F2FS_NODE(node_page);
261a468f0efSJaegeuk Kim unsigned flag = le32_to_cpu(rn->footer.flag);
262a468f0efSJaegeuk Kim return flag >> OFFSET_BIT_SHIFT;
263a468f0efSJaegeuk Kim }
264a468f0efSJaegeuk Kim
cpver_of_node(struct page * node_page)265a468f0efSJaegeuk Kim static inline __u64 cpver_of_node(struct page *node_page)
266a468f0efSJaegeuk Kim {
267a468f0efSJaegeuk Kim struct f2fs_node *rn = F2FS_NODE(node_page);
268a468f0efSJaegeuk Kim return le64_to_cpu(rn->footer.cp_ver);
269a468f0efSJaegeuk Kim }
270a468f0efSJaegeuk Kim
next_blkaddr_of_node(struct page * node_page)271a468f0efSJaegeuk Kim static inline block_t next_blkaddr_of_node(struct page *node_page)
272a468f0efSJaegeuk Kim {
273a468f0efSJaegeuk Kim struct f2fs_node *rn = F2FS_NODE(node_page);
274a468f0efSJaegeuk Kim return le32_to_cpu(rn->footer.next_blkaddr);
275a468f0efSJaegeuk Kim }
276a468f0efSJaegeuk Kim
fill_node_footer(struct page * page,nid_t nid,nid_t ino,unsigned int ofs,bool reset)27739a53e0cSJaegeuk Kim static inline void fill_node_footer(struct page *page, nid_t nid,
27839a53e0cSJaegeuk Kim nid_t ino, unsigned int ofs, bool reset)
27939a53e0cSJaegeuk Kim {
28045590710SGu Zheng struct f2fs_node *rn = F2FS_NODE(page);
28109eb483eSJaegeuk Kim unsigned int old_flag = 0;
28209eb483eSJaegeuk Kim
28339a53e0cSJaegeuk Kim if (reset)
28439a53e0cSJaegeuk Kim memset(rn, 0, sizeof(*rn));
28509eb483eSJaegeuk Kim else
28609eb483eSJaegeuk Kim old_flag = le32_to_cpu(rn->footer.flag);
28709eb483eSJaegeuk Kim
28839a53e0cSJaegeuk Kim rn->footer.nid = cpu_to_le32(nid);
28939a53e0cSJaegeuk Kim rn->footer.ino = cpu_to_le32(ino);
29009eb483eSJaegeuk Kim
29109eb483eSJaegeuk Kim /* should remain old flag bits such as COLD_BIT_SHIFT */
29209eb483eSJaegeuk Kim rn->footer.flag = cpu_to_le32((ofs << OFFSET_BIT_SHIFT) |
29309eb483eSJaegeuk Kim (old_flag & OFFSET_BIT_MASK));
29439a53e0cSJaegeuk Kim }
29539a53e0cSJaegeuk Kim
copy_node_footer(struct page * dst,struct page * src)29639a53e0cSJaegeuk Kim static inline void copy_node_footer(struct page *dst, struct page *src)
29739a53e0cSJaegeuk Kim {
29845590710SGu Zheng struct f2fs_node *src_rn = F2FS_NODE(src);
29945590710SGu Zheng struct f2fs_node *dst_rn = F2FS_NODE(dst);
30039a53e0cSJaegeuk Kim memcpy(&dst_rn->footer, &src_rn->footer, sizeof(struct node_footer));
30139a53e0cSJaegeuk Kim }
30239a53e0cSJaegeuk Kim
fill_node_footer_blkaddr(struct page * page,block_t blkaddr)30339a53e0cSJaegeuk Kim static inline void fill_node_footer_blkaddr(struct page *page, block_t blkaddr)
30439a53e0cSJaegeuk Kim {
3054081363fSJaegeuk Kim struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page));
30645590710SGu Zheng struct f2fs_node *rn = F2FS_NODE(page);
307ced2c7eaSKinglong Mee __u64 cp_ver = cur_cp_version(ckpt);
30845590710SGu Zheng
309ced2c7eaSKinglong Mee if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG))
310ced2c7eaSKinglong Mee cp_ver |= (cur_cp_crc(ckpt) << 32);
311ced2c7eaSKinglong Mee
312a468f0efSJaegeuk Kim rn->footer.cp_ver = cpu_to_le64(cp_ver);
31325ca923bSJaegeuk Kim rn->footer.next_blkaddr = cpu_to_le32(blkaddr);
31439a53e0cSJaegeuk Kim }
31539a53e0cSJaegeuk Kim
is_recoverable_dnode(struct page * page)316a468f0efSJaegeuk Kim static inline bool is_recoverable_dnode(struct page *page)
31739a53e0cSJaegeuk Kim {
318a468f0efSJaegeuk Kim struct f2fs_checkpoint *ckpt = F2FS_CKPT(F2FS_P_SB(page));
319a468f0efSJaegeuk Kim __u64 cp_ver = cur_cp_version(ckpt);
32039a53e0cSJaegeuk Kim
321f2367923SJaegeuk Kim /* Don't care crc part, if fsck.f2fs sets it. */
322f2367923SJaegeuk Kim if (__is_set_ckpt_flags(ckpt, CP_NOCRC_RECOVERY_FLAG))
323f2367923SJaegeuk Kim return (cp_ver << 32) == (cpver_of_node(page) << 32);
324f2367923SJaegeuk Kim
325ced2c7eaSKinglong Mee if (__is_set_ckpt_flags(ckpt, CP_CRC_RECOVERY_FLAG))
326ced2c7eaSKinglong Mee cp_ver |= (cur_cp_crc(ckpt) << 32);
327ced2c7eaSKinglong Mee
3280c0b471eSEric Biggers return cp_ver == cpver_of_node(page);
32939a53e0cSJaegeuk Kim }
33039a53e0cSJaegeuk Kim
33139a53e0cSJaegeuk Kim /*
33239a53e0cSJaegeuk Kim * f2fs assigns the following node offsets described as (num).
33339a53e0cSJaegeuk Kim * N = NIDS_PER_BLOCK
33439a53e0cSJaegeuk Kim *
33539a53e0cSJaegeuk Kim * Inode block (0)
33639a53e0cSJaegeuk Kim * |- direct node (1)
33739a53e0cSJaegeuk Kim * |- direct node (2)
33839a53e0cSJaegeuk Kim * |- indirect node (3)
33939a53e0cSJaegeuk Kim * | `- direct node (4 => 4 + N - 1)
34039a53e0cSJaegeuk Kim * |- indirect node (4 + N)
34139a53e0cSJaegeuk Kim * | `- direct node (5 + N => 5 + 2N - 1)
34239a53e0cSJaegeuk Kim * `- double indirect node (5 + 2N)
34339a53e0cSJaegeuk Kim * `- indirect node (6 + 2N)
3444f4124d0SChao Yu * `- direct node
3454f4124d0SChao Yu * ......
3464f4124d0SChao Yu * `- indirect node ((6 + 2N) + x(N + 1))
3474f4124d0SChao Yu * `- direct node
3484f4124d0SChao Yu * ......
3494f4124d0SChao Yu * `- indirect node ((6 + 2N) + (N - 1)(N + 1))
3504f4124d0SChao Yu * `- direct node
35139a53e0cSJaegeuk Kim */
IS_DNODE(const struct page * node_page)352521a4684SMatthew Wilcox (Oracle) static inline bool IS_DNODE(const struct page *node_page)
35339a53e0cSJaegeuk Kim {
35439a53e0cSJaegeuk Kim unsigned int ofs = ofs_of_node(node_page);
355dbe6a5ffSJaegeuk Kim
3564bc8e9bcSChao Yu if (f2fs_has_xattr_block(ofs))
357d260081cSChao Yu return true;
358dbe6a5ffSJaegeuk Kim
35939a53e0cSJaegeuk Kim if (ofs == 3 || ofs == 4 + NIDS_PER_BLOCK ||
36039a53e0cSJaegeuk Kim ofs == 5 + 2 * NIDS_PER_BLOCK)
36139a53e0cSJaegeuk Kim return false;
36239a53e0cSJaegeuk Kim if (ofs >= 6 + 2 * NIDS_PER_BLOCK) {
36339a53e0cSJaegeuk Kim ofs -= 6 + 2 * NIDS_PER_BLOCK;
3643315101fSZhihui Zhang if (!((long int)ofs % (NIDS_PER_BLOCK + 1)))
36539a53e0cSJaegeuk Kim return false;
36639a53e0cSJaegeuk Kim }
36739a53e0cSJaegeuk Kim return true;
36839a53e0cSJaegeuk Kim }
36939a53e0cSJaegeuk Kim
set_nid(struct page * p,int off,nid_t nid,bool i)37012719ae1SJaegeuk Kim static inline int set_nid(struct page *p, int off, nid_t nid, bool i)
37139a53e0cSJaegeuk Kim {
37245590710SGu Zheng struct f2fs_node *rn = F2FS_NODE(p);
37339a53e0cSJaegeuk Kim
374bae0ee7aSChao Yu f2fs_wait_on_page_writeback(p, NODE, true, true);
37539a53e0cSJaegeuk Kim
37639a53e0cSJaegeuk Kim if (i)
37739a53e0cSJaegeuk Kim rn->i.i_nid[off - NODE_DIR1_BLOCK] = cpu_to_le32(nid);
37839a53e0cSJaegeuk Kim else
37939a53e0cSJaegeuk Kim rn->in.nid[off] = cpu_to_le32(nid);
38012719ae1SJaegeuk Kim return set_page_dirty(p);
38139a53e0cSJaegeuk Kim }
38239a53e0cSJaegeuk Kim
get_nid(struct page * p,int off,bool i)38339a53e0cSJaegeuk Kim static inline nid_t get_nid(struct page *p, int off, bool i)
38439a53e0cSJaegeuk Kim {
38545590710SGu Zheng struct f2fs_node *rn = F2FS_NODE(p);
38645590710SGu Zheng
38739a53e0cSJaegeuk Kim if (i)
38839a53e0cSJaegeuk Kim return le32_to_cpu(rn->i.i_nid[off - NODE_DIR1_BLOCK]);
38939a53e0cSJaegeuk Kim return le32_to_cpu(rn->in.nid[off]);
39039a53e0cSJaegeuk Kim }
39139a53e0cSJaegeuk Kim
39239a53e0cSJaegeuk Kim /*
39339a53e0cSJaegeuk Kim * Coldness identification:
39439a53e0cSJaegeuk Kim * - Mark cold files in f2fs_inode_info
39539a53e0cSJaegeuk Kim * - Mark cold node blocks in their node footer
39639a53e0cSJaegeuk Kim * - Mark cold data pages in page cache
39739a53e0cSJaegeuk Kim */
39839a53e0cSJaegeuk Kim
is_node(const struct page * page,int type)399521a4684SMatthew Wilcox (Oracle) static inline int is_node(const struct page *page, int type)
40039a53e0cSJaegeuk Kim {
40145590710SGu Zheng struct f2fs_node *rn = F2FS_NODE(page);
402447286ebSYangtao Li return le32_to_cpu(rn->footer.flag) & BIT(type);
40339a53e0cSJaegeuk Kim }
40439a53e0cSJaegeuk Kim
405a06a2416SNamjae Jeon #define is_cold_node(page) is_node(page, COLD_BIT_SHIFT)
406a06a2416SNamjae Jeon #define is_fsync_dnode(page) is_node(page, FSYNC_BIT_SHIFT)
407a06a2416SNamjae Jeon #define is_dent_dnode(page) is_node(page, DENT_BIT_SHIFT)
40839a53e0cSJaegeuk Kim
set_cold_node(struct page * page,bool is_dir)409c5667575SChao Yu static inline void set_cold_node(struct page *page, bool is_dir)
41039a53e0cSJaegeuk Kim {
41145590710SGu Zheng struct f2fs_node *rn = F2FS_NODE(page);
41239a53e0cSJaegeuk Kim unsigned int flag = le32_to_cpu(rn->footer.flag);
41339a53e0cSJaegeuk Kim
414c5667575SChao Yu if (is_dir)
415447286ebSYangtao Li flag &= ~BIT(COLD_BIT_SHIFT);
41639a53e0cSJaegeuk Kim else
417447286ebSYangtao Li flag |= BIT(COLD_BIT_SHIFT);
41839a53e0cSJaegeuk Kim rn->footer.flag = cpu_to_le32(flag);
41939a53e0cSJaegeuk Kim }
42039a53e0cSJaegeuk Kim
set_mark(struct page * page,int mark,int type)421a06a2416SNamjae Jeon static inline void set_mark(struct page *page, int mark, int type)
42239a53e0cSJaegeuk Kim {
42345590710SGu Zheng struct f2fs_node *rn = F2FS_NODE(page);
42439a53e0cSJaegeuk Kim unsigned int flag = le32_to_cpu(rn->footer.flag);
42539a53e0cSJaegeuk Kim if (mark)
426447286ebSYangtao Li flag |= BIT(type);
42739a53e0cSJaegeuk Kim else
428447286ebSYangtao Li flag &= ~BIT(type);
42939a53e0cSJaegeuk Kim rn->footer.flag = cpu_to_le32(flag);
43054c55c4eSWeichao Guo
43154c55c4eSWeichao Guo #ifdef CONFIG_F2FS_CHECK_FS
43254c55c4eSWeichao Guo f2fs_inode_chksum_set(F2FS_P_SB(page), page);
43354c55c4eSWeichao Guo #endif
43439a53e0cSJaegeuk Kim }
435a06a2416SNamjae Jeon #define set_dentry_mark(page, mark) set_mark(page, mark, DENT_BIT_SHIFT)
436a06a2416SNamjae Jeon #define set_fsync_mark(page, mark) set_mark(page, mark, FSYNC_BIT_SHIFT)
437