1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * fs/f2fs/node.c 4 * 5 * Copyright (c) 2012 Samsung Electronics Co., Ltd. 6 * http://www.samsung.com/ 7 */ 8 #include <linux/fs.h> 9 #include <linux/f2fs_fs.h> 10 #include <linux/mpage.h> 11 #include <linux/sched/mm.h> 12 #include <linux/blkdev.h> 13 #include <linux/pagevec.h> 14 #include <linux/swap.h> 15 16 #include "f2fs.h" 17 #include "node.h" 18 #include "segment.h" 19 #include "xattr.h" 20 #include "iostat.h" 21 #include <trace/events/f2fs.h> 22 23 #define on_f2fs_build_free_nids(nm_i) mutex_is_locked(&(nm_i)->build_lock) 24 25 static struct kmem_cache *nat_entry_slab; 26 static struct kmem_cache *free_nid_slab; 27 static struct kmem_cache *nat_entry_set_slab; 28 static struct kmem_cache *fsync_node_entry_slab; 29 30 /* 31 * Check whether the given nid is within node id range. 32 */ 33 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid) 34 { 35 if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) { 36 set_sbi_flag(sbi, SBI_NEED_FSCK); 37 f2fs_warn(sbi, "%s: out-of-range nid=%x, run fsck to fix.", 38 __func__, nid); 39 f2fs_handle_error(sbi, ERROR_CORRUPTED_INODE); 40 return -EFSCORRUPTED; 41 } 42 return 0; 43 } 44 45 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type) 46 { 47 struct f2fs_nm_info *nm_i = NM_I(sbi); 48 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info; 49 struct sysinfo val; 50 unsigned long avail_ram; 51 unsigned long mem_size = 0; 52 bool res = false; 53 54 if (!nm_i) 55 return true; 56 57 si_meminfo(&val); 58 59 /* only uses low memory */ 60 avail_ram = val.totalram - val.totalhigh; 61 62 /* 63 * give 25%, 25%, 50%, 50%, 25%, 25% memory for each components respectively 64 */ 65 if (type == FREE_NIDS) { 66 mem_size = (nm_i->nid_cnt[FREE_NID] * 67 sizeof(struct free_nid)) >> PAGE_SHIFT; 68 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); 69 } else if (type == NAT_ENTRIES) { 70 mem_size = (nm_i->nat_cnt[TOTAL_NAT] * 71 sizeof(struct nat_entry)) >> PAGE_SHIFT; 72 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); 73 if (excess_cached_nats(sbi)) 74 res = false; 75 } else if (type == DIRTY_DENTS) { 76 if (sbi->sb->s_bdi->wb.dirty_exceeded) 77 return false; 78 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS); 79 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); 80 } else if (type == INO_ENTRIES) { 81 int i; 82 83 for (i = 0; i < MAX_INO_ENTRY; i++) 84 mem_size += sbi->im[i].ino_num * 85 sizeof(struct ino_entry); 86 mem_size >>= PAGE_SHIFT; 87 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); 88 } else if (type == READ_EXTENT_CACHE || type == AGE_EXTENT_CACHE) { 89 enum extent_type etype = type == READ_EXTENT_CACHE ? 90 EX_READ : EX_BLOCK_AGE; 91 struct extent_tree_info *eti = &sbi->extent_tree[etype]; 92 93 mem_size = (atomic_read(&eti->total_ext_tree) * 94 sizeof(struct extent_tree) + 95 atomic_read(&eti->total_ext_node) * 96 sizeof(struct extent_node)) >> PAGE_SHIFT; 97 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); 98 } else if (type == DISCARD_CACHE) { 99 mem_size = (atomic_read(&dcc->discard_cmd_cnt) * 100 sizeof(struct discard_cmd)) >> PAGE_SHIFT; 101 res = mem_size < (avail_ram * nm_i->ram_thresh / 100); 102 } else if (type == COMPRESS_PAGE) { 103 #ifdef CONFIG_F2FS_FS_COMPRESSION 104 unsigned long free_ram = val.freeram; 105 106 /* 107 * free memory is lower than watermark or cached page count 108 * exceed threshold, deny caching compress page. 109 */ 110 res = (free_ram > avail_ram * sbi->compress_watermark / 100) && 111 (COMPRESS_MAPPING(sbi)->nrpages < 112 free_ram * sbi->compress_percent / 100); 113 #else 114 res = false; 115 #endif 116 } else { 117 if (!sbi->sb->s_bdi->wb.dirty_exceeded) 118 return true; 119 } 120 return res; 121 } 122 123 static void clear_node_page_dirty(struct page *page) 124 { 125 if (PageDirty(page)) { 126 f2fs_clear_page_cache_dirty_tag(page_folio(page)); 127 clear_page_dirty_for_io(page); 128 dec_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES); 129 } 130 ClearPageUptodate(page); 131 } 132 133 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid) 134 { 135 return f2fs_get_meta_page_retry(sbi, current_nat_addr(sbi, nid)); 136 } 137 138 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid) 139 { 140 struct page *src_page; 141 struct page *dst_page; 142 pgoff_t dst_off; 143 void *src_addr; 144 void *dst_addr; 145 struct f2fs_nm_info *nm_i = NM_I(sbi); 146 147 dst_off = next_nat_addr(sbi, current_nat_addr(sbi, nid)); 148 149 /* get current nat block page with lock */ 150 src_page = get_current_nat_page(sbi, nid); 151 if (IS_ERR(src_page)) 152 return src_page; 153 dst_page = f2fs_grab_meta_page(sbi, dst_off); 154 f2fs_bug_on(sbi, PageDirty(src_page)); 155 156 src_addr = page_address(src_page); 157 dst_addr = page_address(dst_page); 158 memcpy(dst_addr, src_addr, PAGE_SIZE); 159 set_page_dirty(dst_page); 160 f2fs_put_page(src_page, 1); 161 162 set_to_next_nat(nm_i, nid); 163 164 return dst_page; 165 } 166 167 static struct nat_entry *__alloc_nat_entry(struct f2fs_sb_info *sbi, 168 nid_t nid, bool no_fail) 169 { 170 struct nat_entry *new; 171 172 new = f2fs_kmem_cache_alloc(nat_entry_slab, 173 GFP_F2FS_ZERO, no_fail, sbi); 174 if (new) { 175 nat_set_nid(new, nid); 176 nat_reset_flag(new); 177 } 178 return new; 179 } 180 181 static void __free_nat_entry(struct nat_entry *e) 182 { 183 kmem_cache_free(nat_entry_slab, e); 184 } 185 186 /* must be locked by nat_tree_lock */ 187 static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i, 188 struct nat_entry *ne, struct f2fs_nat_entry *raw_ne, bool no_fail) 189 { 190 if (no_fail) 191 f2fs_radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne); 192 else if (radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne)) 193 return NULL; 194 195 if (raw_ne) 196 node_info_from_raw_nat(&ne->ni, raw_ne); 197 198 spin_lock(&nm_i->nat_list_lock); 199 list_add_tail(&ne->list, &nm_i->nat_entries); 200 spin_unlock(&nm_i->nat_list_lock); 201 202 nm_i->nat_cnt[TOTAL_NAT]++; 203 nm_i->nat_cnt[RECLAIMABLE_NAT]++; 204 return ne; 205 } 206 207 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n) 208 { 209 struct nat_entry *ne; 210 211 ne = radix_tree_lookup(&nm_i->nat_root, n); 212 213 /* for recent accessed nat entry, move it to tail of lru list */ 214 if (ne && !get_nat_flag(ne, IS_DIRTY)) { 215 spin_lock(&nm_i->nat_list_lock); 216 if (!list_empty(&ne->list)) 217 list_move_tail(&ne->list, &nm_i->nat_entries); 218 spin_unlock(&nm_i->nat_list_lock); 219 } 220 221 return ne; 222 } 223 224 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i, 225 nid_t start, unsigned int nr, struct nat_entry **ep) 226 { 227 return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr); 228 } 229 230 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e) 231 { 232 radix_tree_delete(&nm_i->nat_root, nat_get_nid(e)); 233 nm_i->nat_cnt[TOTAL_NAT]--; 234 nm_i->nat_cnt[RECLAIMABLE_NAT]--; 235 __free_nat_entry(e); 236 } 237 238 static struct nat_entry_set *__grab_nat_entry_set(struct f2fs_nm_info *nm_i, 239 struct nat_entry *ne) 240 { 241 nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid); 242 struct nat_entry_set *head; 243 244 head = radix_tree_lookup(&nm_i->nat_set_root, set); 245 if (!head) { 246 head = f2fs_kmem_cache_alloc(nat_entry_set_slab, 247 GFP_NOFS, true, NULL); 248 249 INIT_LIST_HEAD(&head->entry_list); 250 INIT_LIST_HEAD(&head->set_list); 251 head->set = set; 252 head->entry_cnt = 0; 253 f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head); 254 } 255 return head; 256 } 257 258 static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i, 259 struct nat_entry *ne) 260 { 261 struct nat_entry_set *head; 262 bool new_ne = nat_get_blkaddr(ne) == NEW_ADDR; 263 264 if (!new_ne) 265 head = __grab_nat_entry_set(nm_i, ne); 266 267 /* 268 * update entry_cnt in below condition: 269 * 1. update NEW_ADDR to valid block address; 270 * 2. update old block address to new one; 271 */ 272 if (!new_ne && (get_nat_flag(ne, IS_PREALLOC) || 273 !get_nat_flag(ne, IS_DIRTY))) 274 head->entry_cnt++; 275 276 set_nat_flag(ne, IS_PREALLOC, new_ne); 277 278 if (get_nat_flag(ne, IS_DIRTY)) 279 goto refresh_list; 280 281 nm_i->nat_cnt[DIRTY_NAT]++; 282 nm_i->nat_cnt[RECLAIMABLE_NAT]--; 283 set_nat_flag(ne, IS_DIRTY, true); 284 refresh_list: 285 spin_lock(&nm_i->nat_list_lock); 286 if (new_ne) 287 list_del_init(&ne->list); 288 else 289 list_move_tail(&ne->list, &head->entry_list); 290 spin_unlock(&nm_i->nat_list_lock); 291 } 292 293 static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i, 294 struct nat_entry_set *set, struct nat_entry *ne) 295 { 296 spin_lock(&nm_i->nat_list_lock); 297 list_move_tail(&ne->list, &nm_i->nat_entries); 298 spin_unlock(&nm_i->nat_list_lock); 299 300 set_nat_flag(ne, IS_DIRTY, false); 301 set->entry_cnt--; 302 nm_i->nat_cnt[DIRTY_NAT]--; 303 nm_i->nat_cnt[RECLAIMABLE_NAT]++; 304 } 305 306 static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i, 307 nid_t start, unsigned int nr, struct nat_entry_set **ep) 308 { 309 return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep, 310 start, nr); 311 } 312 313 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page) 314 { 315 return NODE_MAPPING(sbi) == page->mapping && 316 IS_DNODE(page) && is_cold_node(page); 317 } 318 319 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi) 320 { 321 spin_lock_init(&sbi->fsync_node_lock); 322 INIT_LIST_HEAD(&sbi->fsync_node_list); 323 sbi->fsync_seg_id = 0; 324 sbi->fsync_node_num = 0; 325 } 326 327 static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi, 328 struct page *page) 329 { 330 struct fsync_node_entry *fn; 331 unsigned long flags; 332 unsigned int seq_id; 333 334 fn = f2fs_kmem_cache_alloc(fsync_node_entry_slab, 335 GFP_NOFS, true, NULL); 336 337 get_page(page); 338 fn->page = page; 339 INIT_LIST_HEAD(&fn->list); 340 341 spin_lock_irqsave(&sbi->fsync_node_lock, flags); 342 list_add_tail(&fn->list, &sbi->fsync_node_list); 343 fn->seq_id = sbi->fsync_seg_id++; 344 seq_id = fn->seq_id; 345 sbi->fsync_node_num++; 346 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 347 348 return seq_id; 349 } 350 351 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page) 352 { 353 struct fsync_node_entry *fn; 354 unsigned long flags; 355 356 spin_lock_irqsave(&sbi->fsync_node_lock, flags); 357 list_for_each_entry(fn, &sbi->fsync_node_list, list) { 358 if (fn->page == page) { 359 list_del(&fn->list); 360 sbi->fsync_node_num--; 361 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 362 kmem_cache_free(fsync_node_entry_slab, fn); 363 put_page(page); 364 return; 365 } 366 } 367 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 368 f2fs_bug_on(sbi, 1); 369 } 370 371 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi) 372 { 373 unsigned long flags; 374 375 spin_lock_irqsave(&sbi->fsync_node_lock, flags); 376 sbi->fsync_seg_id = 0; 377 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 378 } 379 380 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid) 381 { 382 struct f2fs_nm_info *nm_i = NM_I(sbi); 383 struct nat_entry *e; 384 bool need = false; 385 386 f2fs_down_read(&nm_i->nat_tree_lock); 387 e = __lookup_nat_cache(nm_i, nid); 388 if (e) { 389 if (!get_nat_flag(e, IS_CHECKPOINTED) && 390 !get_nat_flag(e, HAS_FSYNCED_INODE)) 391 need = true; 392 } 393 f2fs_up_read(&nm_i->nat_tree_lock); 394 return need; 395 } 396 397 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid) 398 { 399 struct f2fs_nm_info *nm_i = NM_I(sbi); 400 struct nat_entry *e; 401 bool is_cp = true; 402 403 f2fs_down_read(&nm_i->nat_tree_lock); 404 e = __lookup_nat_cache(nm_i, nid); 405 if (e && !get_nat_flag(e, IS_CHECKPOINTED)) 406 is_cp = false; 407 f2fs_up_read(&nm_i->nat_tree_lock); 408 return is_cp; 409 } 410 411 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino) 412 { 413 struct f2fs_nm_info *nm_i = NM_I(sbi); 414 struct nat_entry *e; 415 bool need_update = true; 416 417 f2fs_down_read(&nm_i->nat_tree_lock); 418 e = __lookup_nat_cache(nm_i, ino); 419 if (e && get_nat_flag(e, HAS_LAST_FSYNC) && 420 (get_nat_flag(e, IS_CHECKPOINTED) || 421 get_nat_flag(e, HAS_FSYNCED_INODE))) 422 need_update = false; 423 f2fs_up_read(&nm_i->nat_tree_lock); 424 return need_update; 425 } 426 427 /* must be locked by nat_tree_lock */ 428 static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid, 429 struct f2fs_nat_entry *ne) 430 { 431 struct f2fs_nm_info *nm_i = NM_I(sbi); 432 struct nat_entry *new, *e; 433 434 /* Let's mitigate lock contention of nat_tree_lock during checkpoint */ 435 if (f2fs_rwsem_is_locked(&sbi->cp_global_sem)) 436 return; 437 438 new = __alloc_nat_entry(sbi, nid, false); 439 if (!new) 440 return; 441 442 f2fs_down_write(&nm_i->nat_tree_lock); 443 e = __lookup_nat_cache(nm_i, nid); 444 if (!e) 445 e = __init_nat_entry(nm_i, new, ne, false); 446 else 447 f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) || 448 nat_get_blkaddr(e) != 449 le32_to_cpu(ne->block_addr) || 450 nat_get_version(e) != ne->version); 451 f2fs_up_write(&nm_i->nat_tree_lock); 452 if (e != new) 453 __free_nat_entry(new); 454 } 455 456 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, 457 block_t new_blkaddr, bool fsync_done) 458 { 459 struct f2fs_nm_info *nm_i = NM_I(sbi); 460 struct nat_entry *e; 461 struct nat_entry *new = __alloc_nat_entry(sbi, ni->nid, true); 462 463 f2fs_down_write(&nm_i->nat_tree_lock); 464 e = __lookup_nat_cache(nm_i, ni->nid); 465 if (!e) { 466 e = __init_nat_entry(nm_i, new, NULL, true); 467 copy_node_info(&e->ni, ni); 468 f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR); 469 } else if (new_blkaddr == NEW_ADDR) { 470 /* 471 * when nid is reallocated, 472 * previous nat entry can be remained in nat cache. 473 * So, reinitialize it with new information. 474 */ 475 copy_node_info(&e->ni, ni); 476 f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR); 477 } 478 /* let's free early to reduce memory consumption */ 479 if (e != new) 480 __free_nat_entry(new); 481 482 /* sanity check */ 483 f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr); 484 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR && 485 new_blkaddr == NULL_ADDR); 486 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR && 487 new_blkaddr == NEW_ADDR); 488 f2fs_bug_on(sbi, __is_valid_data_blkaddr(nat_get_blkaddr(e)) && 489 new_blkaddr == NEW_ADDR); 490 491 /* increment version no as node is removed */ 492 if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) { 493 unsigned char version = nat_get_version(e); 494 495 nat_set_version(e, inc_node_version(version)); 496 } 497 498 /* change address */ 499 nat_set_blkaddr(e, new_blkaddr); 500 if (!__is_valid_data_blkaddr(new_blkaddr)) 501 set_nat_flag(e, IS_CHECKPOINTED, false); 502 __set_nat_cache_dirty(nm_i, e); 503 504 /* update fsync_mark if its inode nat entry is still alive */ 505 if (ni->nid != ni->ino) 506 e = __lookup_nat_cache(nm_i, ni->ino); 507 if (e) { 508 if (fsync_done && ni->nid == ni->ino) 509 set_nat_flag(e, HAS_FSYNCED_INODE, true); 510 set_nat_flag(e, HAS_LAST_FSYNC, fsync_done); 511 } 512 f2fs_up_write(&nm_i->nat_tree_lock); 513 } 514 515 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) 516 { 517 struct f2fs_nm_info *nm_i = NM_I(sbi); 518 int nr = nr_shrink; 519 520 if (!f2fs_down_write_trylock(&nm_i->nat_tree_lock)) 521 return 0; 522 523 spin_lock(&nm_i->nat_list_lock); 524 while (nr_shrink) { 525 struct nat_entry *ne; 526 527 if (list_empty(&nm_i->nat_entries)) 528 break; 529 530 ne = list_first_entry(&nm_i->nat_entries, 531 struct nat_entry, list); 532 list_del(&ne->list); 533 spin_unlock(&nm_i->nat_list_lock); 534 535 __del_from_nat_cache(nm_i, ne); 536 nr_shrink--; 537 538 spin_lock(&nm_i->nat_list_lock); 539 } 540 spin_unlock(&nm_i->nat_list_lock); 541 542 f2fs_up_write(&nm_i->nat_tree_lock); 543 return nr - nr_shrink; 544 } 545 546 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid, 547 struct node_info *ni, bool checkpoint_context) 548 { 549 struct f2fs_nm_info *nm_i = NM_I(sbi); 550 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 551 struct f2fs_journal *journal = curseg->journal; 552 nid_t start_nid = START_NID(nid); 553 struct f2fs_nat_block *nat_blk; 554 struct page *page = NULL; 555 struct f2fs_nat_entry ne; 556 struct nat_entry *e; 557 pgoff_t index; 558 block_t blkaddr; 559 int i; 560 561 ni->nid = nid; 562 retry: 563 /* Check nat cache */ 564 f2fs_down_read(&nm_i->nat_tree_lock); 565 e = __lookup_nat_cache(nm_i, nid); 566 if (e) { 567 ni->ino = nat_get_ino(e); 568 ni->blk_addr = nat_get_blkaddr(e); 569 ni->version = nat_get_version(e); 570 f2fs_up_read(&nm_i->nat_tree_lock); 571 return 0; 572 } 573 574 /* 575 * Check current segment summary by trying to grab journal_rwsem first. 576 * This sem is on the critical path on the checkpoint requiring the above 577 * nat_tree_lock. Therefore, we should retry, if we failed to grab here 578 * while not bothering checkpoint. 579 */ 580 if (!f2fs_rwsem_is_locked(&sbi->cp_global_sem) || checkpoint_context) { 581 down_read(&curseg->journal_rwsem); 582 } else if (f2fs_rwsem_is_contended(&nm_i->nat_tree_lock) || 583 !down_read_trylock(&curseg->journal_rwsem)) { 584 f2fs_up_read(&nm_i->nat_tree_lock); 585 goto retry; 586 } 587 588 i = f2fs_lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0); 589 if (i >= 0) { 590 ne = nat_in_journal(journal, i); 591 node_info_from_raw_nat(ni, &ne); 592 } 593 up_read(&curseg->journal_rwsem); 594 if (i >= 0) { 595 f2fs_up_read(&nm_i->nat_tree_lock); 596 goto cache; 597 } 598 599 /* Fill node_info from nat page */ 600 index = current_nat_addr(sbi, nid); 601 f2fs_up_read(&nm_i->nat_tree_lock); 602 603 page = f2fs_get_meta_page(sbi, index); 604 if (IS_ERR(page)) 605 return PTR_ERR(page); 606 607 nat_blk = (struct f2fs_nat_block *)page_address(page); 608 ne = nat_blk->entries[nid - start_nid]; 609 node_info_from_raw_nat(ni, &ne); 610 f2fs_put_page(page, 1); 611 cache: 612 blkaddr = le32_to_cpu(ne.block_addr); 613 if (__is_valid_data_blkaddr(blkaddr) && 614 !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) 615 return -EFAULT; 616 617 /* cache nat entry */ 618 cache_nat_entry(sbi, nid, &ne); 619 return 0; 620 } 621 622 /* 623 * readahead MAX_RA_NODE number of node pages. 624 */ 625 static void f2fs_ra_node_pages(struct page *parent, int start, int n) 626 { 627 struct f2fs_sb_info *sbi = F2FS_P_SB(parent); 628 struct blk_plug plug; 629 int i, end; 630 nid_t nid; 631 632 blk_start_plug(&plug); 633 634 /* Then, try readahead for siblings of the desired node */ 635 end = start + n; 636 end = min(end, (int)NIDS_PER_BLOCK); 637 for (i = start; i < end; i++) { 638 nid = get_nid(parent, i, false); 639 f2fs_ra_node_page(sbi, nid); 640 } 641 642 blk_finish_plug(&plug); 643 } 644 645 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs) 646 { 647 const long direct_index = ADDRS_PER_INODE(dn->inode); 648 const long direct_blks = ADDRS_PER_BLOCK(dn->inode); 649 const long indirect_blks = ADDRS_PER_BLOCK(dn->inode) * NIDS_PER_BLOCK; 650 unsigned int skipped_unit = ADDRS_PER_BLOCK(dn->inode); 651 int cur_level = dn->cur_level; 652 int max_level = dn->max_level; 653 pgoff_t base = 0; 654 655 if (!dn->max_level) 656 return pgofs + 1; 657 658 while (max_level-- > cur_level) 659 skipped_unit *= NIDS_PER_BLOCK; 660 661 switch (dn->max_level) { 662 case 3: 663 base += 2 * indirect_blks; 664 fallthrough; 665 case 2: 666 base += 2 * direct_blks; 667 fallthrough; 668 case 1: 669 base += direct_index; 670 break; 671 default: 672 f2fs_bug_on(F2FS_I_SB(dn->inode), 1); 673 } 674 675 return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base; 676 } 677 678 /* 679 * The maximum depth is four. 680 * Offset[0] will have raw inode offset. 681 */ 682 static int get_node_path(struct inode *inode, long block, 683 int offset[4], unsigned int noffset[4]) 684 { 685 const long direct_index = ADDRS_PER_INODE(inode); 686 const long direct_blks = ADDRS_PER_BLOCK(inode); 687 const long dptrs_per_blk = NIDS_PER_BLOCK; 688 const long indirect_blks = ADDRS_PER_BLOCK(inode) * NIDS_PER_BLOCK; 689 const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK; 690 int n = 0; 691 int level = 0; 692 693 noffset[0] = 0; 694 695 if (block < direct_index) { 696 offset[n] = block; 697 goto got; 698 } 699 block -= direct_index; 700 if (block < direct_blks) { 701 offset[n++] = NODE_DIR1_BLOCK; 702 noffset[n] = 1; 703 offset[n] = block; 704 level = 1; 705 goto got; 706 } 707 block -= direct_blks; 708 if (block < direct_blks) { 709 offset[n++] = NODE_DIR2_BLOCK; 710 noffset[n] = 2; 711 offset[n] = block; 712 level = 1; 713 goto got; 714 } 715 block -= direct_blks; 716 if (block < indirect_blks) { 717 offset[n++] = NODE_IND1_BLOCK; 718 noffset[n] = 3; 719 offset[n++] = block / direct_blks; 720 noffset[n] = 4 + offset[n - 1]; 721 offset[n] = block % direct_blks; 722 level = 2; 723 goto got; 724 } 725 block -= indirect_blks; 726 if (block < indirect_blks) { 727 offset[n++] = NODE_IND2_BLOCK; 728 noffset[n] = 4 + dptrs_per_blk; 729 offset[n++] = block / direct_blks; 730 noffset[n] = 5 + dptrs_per_blk + offset[n - 1]; 731 offset[n] = block % direct_blks; 732 level = 2; 733 goto got; 734 } 735 block -= indirect_blks; 736 if (block < dindirect_blks) { 737 offset[n++] = NODE_DIND_BLOCK; 738 noffset[n] = 5 + (dptrs_per_blk * 2); 739 offset[n++] = block / indirect_blks; 740 noffset[n] = 6 + (dptrs_per_blk * 2) + 741 offset[n - 1] * (dptrs_per_blk + 1); 742 offset[n++] = (block / direct_blks) % dptrs_per_blk; 743 noffset[n] = 7 + (dptrs_per_blk * 2) + 744 offset[n - 2] * (dptrs_per_blk + 1) + 745 offset[n - 1]; 746 offset[n] = block % direct_blks; 747 level = 3; 748 goto got; 749 } else { 750 return -E2BIG; 751 } 752 got: 753 return level; 754 } 755 756 /* 757 * Caller should call f2fs_put_dnode(dn). 758 * Also, it should grab and release a rwsem by calling f2fs_lock_op() and 759 * f2fs_unlock_op() only if mode is set with ALLOC_NODE. 760 */ 761 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode) 762 { 763 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 764 struct page *npage[4]; 765 struct page *parent = NULL; 766 int offset[4]; 767 unsigned int noffset[4]; 768 nid_t nids[4]; 769 int level, i = 0; 770 int err = 0; 771 772 level = get_node_path(dn->inode, index, offset, noffset); 773 if (level < 0) 774 return level; 775 776 nids[0] = dn->inode->i_ino; 777 npage[0] = dn->inode_page; 778 779 if (!npage[0]) { 780 npage[0] = f2fs_get_node_page(sbi, nids[0]); 781 if (IS_ERR(npage[0])) 782 return PTR_ERR(npage[0]); 783 } 784 785 /* if inline_data is set, should not report any block indices */ 786 if (f2fs_has_inline_data(dn->inode) && index) { 787 err = -ENOENT; 788 f2fs_put_page(npage[0], 1); 789 goto release_out; 790 } 791 792 parent = npage[0]; 793 if (level != 0) 794 nids[1] = get_nid(parent, offset[0], true); 795 dn->inode_page = npage[0]; 796 dn->inode_page_locked = true; 797 798 /* get indirect or direct nodes */ 799 for (i = 1; i <= level; i++) { 800 bool done = false; 801 802 if (!nids[i] && mode == ALLOC_NODE) { 803 /* alloc new node */ 804 if (!f2fs_alloc_nid(sbi, &(nids[i]))) { 805 err = -ENOSPC; 806 goto release_pages; 807 } 808 809 dn->nid = nids[i]; 810 npage[i] = f2fs_new_node_page(dn, noffset[i]); 811 if (IS_ERR(npage[i])) { 812 f2fs_alloc_nid_failed(sbi, nids[i]); 813 err = PTR_ERR(npage[i]); 814 goto release_pages; 815 } 816 817 set_nid(parent, offset[i - 1], nids[i], i == 1); 818 f2fs_alloc_nid_done(sbi, nids[i]); 819 done = true; 820 } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) { 821 npage[i] = f2fs_get_node_page_ra(parent, offset[i - 1]); 822 if (IS_ERR(npage[i])) { 823 err = PTR_ERR(npage[i]); 824 goto release_pages; 825 } 826 done = true; 827 } 828 if (i == 1) { 829 dn->inode_page_locked = false; 830 unlock_page(parent); 831 } else { 832 f2fs_put_page(parent, 1); 833 } 834 835 if (!done) { 836 npage[i] = f2fs_get_node_page(sbi, nids[i]); 837 if (IS_ERR(npage[i])) { 838 err = PTR_ERR(npage[i]); 839 f2fs_put_page(npage[0], 0); 840 goto release_out; 841 } 842 } 843 if (i < level) { 844 parent = npage[i]; 845 nids[i + 1] = get_nid(parent, offset[i], false); 846 } 847 } 848 dn->nid = nids[level]; 849 dn->ofs_in_node = offset[level]; 850 dn->node_page = npage[level]; 851 dn->data_blkaddr = f2fs_data_blkaddr(dn); 852 853 if (is_inode_flag_set(dn->inode, FI_COMPRESSED_FILE) && 854 f2fs_sb_has_readonly(sbi)) { 855 unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size; 856 unsigned int ofs_in_node = dn->ofs_in_node; 857 pgoff_t fofs = index; 858 unsigned int c_len; 859 block_t blkaddr; 860 861 /* should align fofs and ofs_in_node to cluster_size */ 862 if (fofs % cluster_size) { 863 fofs = round_down(fofs, cluster_size); 864 ofs_in_node = round_down(ofs_in_node, cluster_size); 865 } 866 867 c_len = f2fs_cluster_blocks_are_contiguous(dn, ofs_in_node); 868 if (!c_len) 869 goto out; 870 871 blkaddr = data_blkaddr(dn->inode, dn->node_page, ofs_in_node); 872 if (blkaddr == COMPRESS_ADDR) 873 blkaddr = data_blkaddr(dn->inode, dn->node_page, 874 ofs_in_node + 1); 875 876 f2fs_update_read_extent_tree_range_compressed(dn->inode, 877 fofs, blkaddr, cluster_size, c_len); 878 } 879 out: 880 return 0; 881 882 release_pages: 883 f2fs_put_page(parent, 1); 884 if (i > 1) 885 f2fs_put_page(npage[0], 0); 886 release_out: 887 dn->inode_page = NULL; 888 dn->node_page = NULL; 889 if (err == -ENOENT) { 890 dn->cur_level = i; 891 dn->max_level = level; 892 dn->ofs_in_node = offset[level]; 893 } 894 return err; 895 } 896 897 static int truncate_node(struct dnode_of_data *dn) 898 { 899 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 900 struct node_info ni; 901 int err; 902 pgoff_t index; 903 904 err = f2fs_get_node_info(sbi, dn->nid, &ni, false); 905 if (err) 906 return err; 907 908 if (ni.blk_addr != NEW_ADDR && 909 !f2fs_is_valid_blkaddr(sbi, ni.blk_addr, DATA_GENERIC_ENHANCE)) { 910 f2fs_err_ratelimited(sbi, 911 "nat entry is corrupted, run fsck to fix it, ino:%u, " 912 "nid:%u, blkaddr:%u", ni.ino, ni.nid, ni.blk_addr); 913 set_sbi_flag(sbi, SBI_NEED_FSCK); 914 f2fs_handle_error(sbi, ERROR_INCONSISTENT_NAT); 915 return -EFSCORRUPTED; 916 } 917 918 /* Deallocate node address */ 919 f2fs_invalidate_blocks(sbi, ni.blk_addr); 920 dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino); 921 set_node_addr(sbi, &ni, NULL_ADDR, false); 922 923 if (dn->nid == dn->inode->i_ino) { 924 f2fs_remove_orphan_inode(sbi, dn->nid); 925 dec_valid_inode_count(sbi); 926 f2fs_inode_synced(dn->inode); 927 } 928 929 clear_node_page_dirty(dn->node_page); 930 set_sbi_flag(sbi, SBI_IS_DIRTY); 931 932 index = page_folio(dn->node_page)->index; 933 f2fs_put_page(dn->node_page, 1); 934 935 invalidate_mapping_pages(NODE_MAPPING(sbi), 936 index, index); 937 938 dn->node_page = NULL; 939 trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr); 940 941 return 0; 942 } 943 944 static int truncate_dnode(struct dnode_of_data *dn) 945 { 946 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 947 struct page *page; 948 int err; 949 950 if (dn->nid == 0) 951 return 1; 952 953 /* get direct node */ 954 page = f2fs_get_node_page(sbi, dn->nid); 955 if (PTR_ERR(page) == -ENOENT) 956 return 1; 957 else if (IS_ERR(page)) 958 return PTR_ERR(page); 959 960 if (IS_INODE(page) || ino_of_node(page) != dn->inode->i_ino) { 961 f2fs_err(sbi, "incorrect node reference, ino: %lu, nid: %u, ino_of_node: %u", 962 dn->inode->i_ino, dn->nid, ino_of_node(page)); 963 set_sbi_flag(sbi, SBI_NEED_FSCK); 964 f2fs_handle_error(sbi, ERROR_INVALID_NODE_REFERENCE); 965 f2fs_put_page(page, 1); 966 return -EFSCORRUPTED; 967 } 968 969 /* Make dnode_of_data for parameter */ 970 dn->node_page = page; 971 dn->ofs_in_node = 0; 972 f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode)); 973 err = truncate_node(dn); 974 if (err) { 975 f2fs_put_page(page, 1); 976 return err; 977 } 978 979 return 1; 980 } 981 982 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs, 983 int ofs, int depth) 984 { 985 struct dnode_of_data rdn = *dn; 986 struct page *page; 987 struct f2fs_node *rn; 988 nid_t child_nid; 989 unsigned int child_nofs; 990 int freed = 0; 991 int i, ret; 992 993 if (dn->nid == 0) 994 return NIDS_PER_BLOCK + 1; 995 996 trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr); 997 998 page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid); 999 if (IS_ERR(page)) { 1000 trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page)); 1001 return PTR_ERR(page); 1002 } 1003 1004 f2fs_ra_node_pages(page, ofs, NIDS_PER_BLOCK); 1005 1006 rn = F2FS_NODE(page); 1007 if (depth < 3) { 1008 for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) { 1009 child_nid = le32_to_cpu(rn->in.nid[i]); 1010 if (child_nid == 0) 1011 continue; 1012 rdn.nid = child_nid; 1013 ret = truncate_dnode(&rdn); 1014 if (ret < 0) 1015 goto out_err; 1016 if (set_nid(page, i, 0, false)) 1017 dn->node_changed = true; 1018 } 1019 } else { 1020 child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1; 1021 for (i = ofs; i < NIDS_PER_BLOCK; i++) { 1022 child_nid = le32_to_cpu(rn->in.nid[i]); 1023 if (child_nid == 0) { 1024 child_nofs += NIDS_PER_BLOCK + 1; 1025 continue; 1026 } 1027 rdn.nid = child_nid; 1028 ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1); 1029 if (ret == (NIDS_PER_BLOCK + 1)) { 1030 if (set_nid(page, i, 0, false)) 1031 dn->node_changed = true; 1032 child_nofs += ret; 1033 } else if (ret < 0 && ret != -ENOENT) { 1034 goto out_err; 1035 } 1036 } 1037 freed = child_nofs; 1038 } 1039 1040 if (!ofs) { 1041 /* remove current indirect node */ 1042 dn->node_page = page; 1043 ret = truncate_node(dn); 1044 if (ret) 1045 goto out_err; 1046 freed++; 1047 } else { 1048 f2fs_put_page(page, 1); 1049 } 1050 trace_f2fs_truncate_nodes_exit(dn->inode, freed); 1051 return freed; 1052 1053 out_err: 1054 f2fs_put_page(page, 1); 1055 trace_f2fs_truncate_nodes_exit(dn->inode, ret); 1056 return ret; 1057 } 1058 1059 static int truncate_partial_nodes(struct dnode_of_data *dn, 1060 struct f2fs_inode *ri, int *offset, int depth) 1061 { 1062 struct page *pages[2]; 1063 nid_t nid[3]; 1064 nid_t child_nid; 1065 int err = 0; 1066 int i; 1067 int idx = depth - 2; 1068 1069 nid[0] = get_nid(dn->inode_page, offset[0], true); 1070 if (!nid[0]) 1071 return 0; 1072 1073 /* get indirect nodes in the path */ 1074 for (i = 0; i < idx + 1; i++) { 1075 /* reference count'll be increased */ 1076 pages[i] = f2fs_get_node_page(F2FS_I_SB(dn->inode), nid[i]); 1077 if (IS_ERR(pages[i])) { 1078 err = PTR_ERR(pages[i]); 1079 idx = i - 1; 1080 goto fail; 1081 } 1082 nid[i + 1] = get_nid(pages[i], offset[i + 1], false); 1083 } 1084 1085 f2fs_ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK); 1086 1087 /* free direct nodes linked to a partial indirect node */ 1088 for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) { 1089 child_nid = get_nid(pages[idx], i, false); 1090 if (!child_nid) 1091 continue; 1092 dn->nid = child_nid; 1093 err = truncate_dnode(dn); 1094 if (err < 0) 1095 goto fail; 1096 if (set_nid(pages[idx], i, 0, false)) 1097 dn->node_changed = true; 1098 } 1099 1100 if (offset[idx + 1] == 0) { 1101 dn->node_page = pages[idx]; 1102 dn->nid = nid[idx]; 1103 err = truncate_node(dn); 1104 if (err) 1105 goto fail; 1106 } else { 1107 f2fs_put_page(pages[idx], 1); 1108 } 1109 offset[idx]++; 1110 offset[idx + 1] = 0; 1111 idx--; 1112 fail: 1113 for (i = idx; i >= 0; i--) 1114 f2fs_put_page(pages[i], 1); 1115 1116 trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err); 1117 1118 return err; 1119 } 1120 1121 /* 1122 * All the block addresses of data and nodes should be nullified. 1123 */ 1124 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from) 1125 { 1126 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1127 int err = 0, cont = 1; 1128 int level, offset[4], noffset[4]; 1129 unsigned int nofs = 0; 1130 struct f2fs_inode *ri; 1131 struct dnode_of_data dn; 1132 struct page *page; 1133 1134 trace_f2fs_truncate_inode_blocks_enter(inode, from); 1135 1136 level = get_node_path(inode, from, offset, noffset); 1137 if (level < 0) { 1138 trace_f2fs_truncate_inode_blocks_exit(inode, level); 1139 return level; 1140 } 1141 1142 page = f2fs_get_node_page(sbi, inode->i_ino); 1143 if (IS_ERR(page)) { 1144 trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page)); 1145 return PTR_ERR(page); 1146 } 1147 1148 set_new_dnode(&dn, inode, page, NULL, 0); 1149 unlock_page(page); 1150 1151 ri = F2FS_INODE(page); 1152 switch (level) { 1153 case 0: 1154 case 1: 1155 nofs = noffset[1]; 1156 break; 1157 case 2: 1158 nofs = noffset[1]; 1159 if (!offset[level - 1]) 1160 goto skip_partial; 1161 err = truncate_partial_nodes(&dn, ri, offset, level); 1162 if (err < 0 && err != -ENOENT) 1163 goto fail; 1164 nofs += 1 + NIDS_PER_BLOCK; 1165 break; 1166 case 3: 1167 nofs = 5 + 2 * NIDS_PER_BLOCK; 1168 if (!offset[level - 1]) 1169 goto skip_partial; 1170 err = truncate_partial_nodes(&dn, ri, offset, level); 1171 if (err < 0 && err != -ENOENT) 1172 goto fail; 1173 break; 1174 default: 1175 BUG(); 1176 } 1177 1178 skip_partial: 1179 while (cont) { 1180 dn.nid = get_nid(page, offset[0], true); 1181 switch (offset[0]) { 1182 case NODE_DIR1_BLOCK: 1183 case NODE_DIR2_BLOCK: 1184 err = truncate_dnode(&dn); 1185 break; 1186 1187 case NODE_IND1_BLOCK: 1188 case NODE_IND2_BLOCK: 1189 err = truncate_nodes(&dn, nofs, offset[1], 2); 1190 break; 1191 1192 case NODE_DIND_BLOCK: 1193 err = truncate_nodes(&dn, nofs, offset[1], 3); 1194 cont = 0; 1195 break; 1196 1197 default: 1198 BUG(); 1199 } 1200 if (err == -ENOENT) { 1201 set_sbi_flag(F2FS_P_SB(page), SBI_NEED_FSCK); 1202 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR); 1203 f2fs_err_ratelimited(sbi, 1204 "truncate node fail, ino:%lu, nid:%u, " 1205 "offset[0]:%d, offset[1]:%d, nofs:%d", 1206 inode->i_ino, dn.nid, offset[0], 1207 offset[1], nofs); 1208 err = 0; 1209 } 1210 if (err < 0) 1211 goto fail; 1212 if (offset[1] == 0 && get_nid(page, offset[0], true)) { 1213 lock_page(page); 1214 BUG_ON(page->mapping != NODE_MAPPING(sbi)); 1215 set_nid(page, offset[0], 0, true); 1216 unlock_page(page); 1217 } 1218 offset[1] = 0; 1219 offset[0]++; 1220 nofs += err; 1221 } 1222 fail: 1223 f2fs_put_page(page, 0); 1224 trace_f2fs_truncate_inode_blocks_exit(inode, err); 1225 return err > 0 ? 0 : err; 1226 } 1227 1228 /* caller must lock inode page */ 1229 int f2fs_truncate_xattr_node(struct inode *inode) 1230 { 1231 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1232 nid_t nid = F2FS_I(inode)->i_xattr_nid; 1233 struct dnode_of_data dn; 1234 struct page *npage; 1235 int err; 1236 1237 if (!nid) 1238 return 0; 1239 1240 npage = f2fs_get_node_page(sbi, nid); 1241 if (IS_ERR(npage)) 1242 return PTR_ERR(npage); 1243 1244 set_new_dnode(&dn, inode, NULL, npage, nid); 1245 err = truncate_node(&dn); 1246 if (err) { 1247 f2fs_put_page(npage, 1); 1248 return err; 1249 } 1250 1251 f2fs_i_xnid_write(inode, 0); 1252 1253 return 0; 1254 } 1255 1256 /* 1257 * Caller should grab and release a rwsem by calling f2fs_lock_op() and 1258 * f2fs_unlock_op(). 1259 */ 1260 int f2fs_remove_inode_page(struct inode *inode) 1261 { 1262 struct dnode_of_data dn; 1263 int err; 1264 1265 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino); 1266 err = f2fs_get_dnode_of_data(&dn, 0, LOOKUP_NODE); 1267 if (err) 1268 return err; 1269 1270 err = f2fs_truncate_xattr_node(inode); 1271 if (err) { 1272 f2fs_put_dnode(&dn); 1273 return err; 1274 } 1275 1276 /* remove potential inline_data blocks */ 1277 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 1278 S_ISLNK(inode->i_mode)) 1279 f2fs_truncate_data_blocks_range(&dn, 1); 1280 1281 /* 0 is possible, after f2fs_new_inode() has failed */ 1282 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) { 1283 f2fs_put_dnode(&dn); 1284 return -EIO; 1285 } 1286 1287 if (unlikely(inode->i_blocks != 0 && inode->i_blocks != 8)) { 1288 f2fs_warn(F2FS_I_SB(inode), 1289 "f2fs_remove_inode_page: inconsistent i_blocks, ino:%lu, iblocks:%llu", 1290 inode->i_ino, (unsigned long long)inode->i_blocks); 1291 set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK); 1292 } 1293 1294 /* will put inode & node pages */ 1295 err = truncate_node(&dn); 1296 if (err) { 1297 f2fs_put_dnode(&dn); 1298 return err; 1299 } 1300 return 0; 1301 } 1302 1303 struct page *f2fs_new_inode_page(struct inode *inode) 1304 { 1305 struct dnode_of_data dn; 1306 1307 /* allocate inode page for new inode */ 1308 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino); 1309 1310 /* caller should f2fs_put_page(page, 1); */ 1311 return f2fs_new_node_page(&dn, 0); 1312 } 1313 1314 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs) 1315 { 1316 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 1317 struct node_info new_ni; 1318 struct page *page; 1319 int err; 1320 1321 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC))) 1322 return ERR_PTR(-EPERM); 1323 1324 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false); 1325 if (!page) 1326 return ERR_PTR(-ENOMEM); 1327 1328 if (unlikely((err = inc_valid_node_count(sbi, dn->inode, !ofs)))) 1329 goto fail; 1330 1331 #ifdef CONFIG_F2FS_CHECK_FS 1332 err = f2fs_get_node_info(sbi, dn->nid, &new_ni, false); 1333 if (err) { 1334 dec_valid_node_count(sbi, dn->inode, !ofs); 1335 goto fail; 1336 } 1337 if (unlikely(new_ni.blk_addr != NULL_ADDR)) { 1338 err = -EFSCORRUPTED; 1339 dec_valid_node_count(sbi, dn->inode, !ofs); 1340 set_sbi_flag(sbi, SBI_NEED_FSCK); 1341 f2fs_handle_error(sbi, ERROR_INVALID_BLKADDR); 1342 goto fail; 1343 } 1344 #endif 1345 new_ni.nid = dn->nid; 1346 new_ni.ino = dn->inode->i_ino; 1347 new_ni.blk_addr = NULL_ADDR; 1348 new_ni.flag = 0; 1349 new_ni.version = 0; 1350 set_node_addr(sbi, &new_ni, NEW_ADDR, false); 1351 1352 f2fs_wait_on_page_writeback(page, NODE, true, true); 1353 fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true); 1354 set_cold_node(page, S_ISDIR(dn->inode->i_mode)); 1355 if (!PageUptodate(page)) 1356 SetPageUptodate(page); 1357 if (set_page_dirty(page)) 1358 dn->node_changed = true; 1359 1360 if (f2fs_has_xattr_block(ofs)) 1361 f2fs_i_xnid_write(dn->inode, dn->nid); 1362 1363 if (ofs == 0) 1364 inc_valid_inode_count(sbi); 1365 return page; 1366 fail: 1367 clear_node_page_dirty(page); 1368 f2fs_put_page(page, 1); 1369 return ERR_PTR(err); 1370 } 1371 1372 /* 1373 * Caller should do after getting the following values. 1374 * 0: f2fs_put_page(page, 0) 1375 * LOCKED_PAGE or error: f2fs_put_page(page, 1) 1376 */ 1377 static int read_node_page(struct page *page, blk_opf_t op_flags) 1378 { 1379 struct folio *folio = page_folio(page); 1380 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 1381 struct node_info ni; 1382 struct f2fs_io_info fio = { 1383 .sbi = sbi, 1384 .type = NODE, 1385 .op = REQ_OP_READ, 1386 .op_flags = op_flags, 1387 .page = page, 1388 .encrypted_page = NULL, 1389 }; 1390 int err; 1391 1392 if (folio_test_uptodate(folio)) { 1393 if (!f2fs_inode_chksum_verify(sbi, page)) { 1394 folio_clear_uptodate(folio); 1395 return -EFSBADCRC; 1396 } 1397 return LOCKED_PAGE; 1398 } 1399 1400 err = f2fs_get_node_info(sbi, folio->index, &ni, false); 1401 if (err) 1402 return err; 1403 1404 /* NEW_ADDR can be seen, after cp_error drops some dirty node pages */ 1405 if (unlikely(ni.blk_addr == NULL_ADDR || ni.blk_addr == NEW_ADDR)) { 1406 folio_clear_uptodate(folio); 1407 return -ENOENT; 1408 } 1409 1410 fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr; 1411 1412 err = f2fs_submit_page_bio(&fio); 1413 1414 if (!err) 1415 f2fs_update_iostat(sbi, NULL, FS_NODE_READ_IO, F2FS_BLKSIZE); 1416 1417 return err; 1418 } 1419 1420 /* 1421 * Readahead a node page 1422 */ 1423 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid) 1424 { 1425 struct page *apage; 1426 int err; 1427 1428 if (!nid) 1429 return; 1430 if (f2fs_check_nid_range(sbi, nid)) 1431 return; 1432 1433 apage = xa_load(&NODE_MAPPING(sbi)->i_pages, nid); 1434 if (apage) 1435 return; 1436 1437 apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false); 1438 if (!apage) 1439 return; 1440 1441 err = read_node_page(apage, REQ_RAHEAD); 1442 f2fs_put_page(apage, err ? 1 : 0); 1443 } 1444 1445 static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid, 1446 struct page *parent, int start) 1447 { 1448 struct page *page; 1449 int err; 1450 1451 if (!nid) 1452 return ERR_PTR(-ENOENT); 1453 if (f2fs_check_nid_range(sbi, nid)) 1454 return ERR_PTR(-EINVAL); 1455 repeat: 1456 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false); 1457 if (!page) 1458 return ERR_PTR(-ENOMEM); 1459 1460 err = read_node_page(page, 0); 1461 if (err < 0) { 1462 goto out_put_err; 1463 } else if (err == LOCKED_PAGE) { 1464 err = 0; 1465 goto page_hit; 1466 } 1467 1468 if (parent) 1469 f2fs_ra_node_pages(parent, start + 1, MAX_RA_NODE); 1470 1471 lock_page(page); 1472 1473 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1474 f2fs_put_page(page, 1); 1475 goto repeat; 1476 } 1477 1478 if (unlikely(!PageUptodate(page))) { 1479 err = -EIO; 1480 goto out_err; 1481 } 1482 1483 if (!f2fs_inode_chksum_verify(sbi, page)) { 1484 err = -EFSBADCRC; 1485 goto out_err; 1486 } 1487 page_hit: 1488 if (likely(nid == nid_of_node(page))) 1489 return page; 1490 1491 f2fs_warn(sbi, "inconsistent node block, nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]", 1492 nid, nid_of_node(page), ino_of_node(page), 1493 ofs_of_node(page), cpver_of_node(page), 1494 next_blkaddr_of_node(page)); 1495 set_sbi_flag(sbi, SBI_NEED_FSCK); 1496 f2fs_handle_error(sbi, ERROR_INCONSISTENT_FOOTER); 1497 err = -EFSCORRUPTED; 1498 out_err: 1499 ClearPageUptodate(page); 1500 out_put_err: 1501 /* ENOENT comes from read_node_page which is not an error. */ 1502 if (err != -ENOENT) 1503 f2fs_handle_page_eio(sbi, page_folio(page), NODE); 1504 f2fs_put_page(page, 1); 1505 return ERR_PTR(err); 1506 } 1507 1508 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid) 1509 { 1510 return __get_node_page(sbi, nid, NULL, 0); 1511 } 1512 1513 struct page *f2fs_get_node_page_ra(struct page *parent, int start) 1514 { 1515 struct f2fs_sb_info *sbi = F2FS_P_SB(parent); 1516 nid_t nid = get_nid(parent, start, false); 1517 1518 return __get_node_page(sbi, nid, parent, start); 1519 } 1520 1521 static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino) 1522 { 1523 struct inode *inode; 1524 struct page *page; 1525 int ret; 1526 1527 /* should flush inline_data before evict_inode */ 1528 inode = ilookup(sbi->sb, ino); 1529 if (!inode) 1530 return; 1531 1532 page = f2fs_pagecache_get_page(inode->i_mapping, 0, 1533 FGP_LOCK|FGP_NOWAIT, 0); 1534 if (!page) 1535 goto iput_out; 1536 1537 if (!PageUptodate(page)) 1538 goto page_out; 1539 1540 if (!PageDirty(page)) 1541 goto page_out; 1542 1543 if (!clear_page_dirty_for_io(page)) 1544 goto page_out; 1545 1546 ret = f2fs_write_inline_data(inode, page_folio(page)); 1547 inode_dec_dirty_pages(inode); 1548 f2fs_remove_dirty_inode(inode); 1549 if (ret) 1550 set_page_dirty(page); 1551 page_out: 1552 f2fs_put_page(page, 1); 1553 iput_out: 1554 iput(inode); 1555 } 1556 1557 static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino) 1558 { 1559 pgoff_t index; 1560 struct folio_batch fbatch; 1561 struct page *last_page = NULL; 1562 int nr_folios; 1563 1564 folio_batch_init(&fbatch); 1565 index = 0; 1566 1567 while ((nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), &index, 1568 (pgoff_t)-1, PAGECACHE_TAG_DIRTY, 1569 &fbatch))) { 1570 int i; 1571 1572 for (i = 0; i < nr_folios; i++) { 1573 struct page *page = &fbatch.folios[i]->page; 1574 1575 if (unlikely(f2fs_cp_error(sbi))) { 1576 f2fs_put_page(last_page, 0); 1577 folio_batch_release(&fbatch); 1578 return ERR_PTR(-EIO); 1579 } 1580 1581 if (!IS_DNODE(page) || !is_cold_node(page)) 1582 continue; 1583 if (ino_of_node(page) != ino) 1584 continue; 1585 1586 lock_page(page); 1587 1588 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1589 continue_unlock: 1590 unlock_page(page); 1591 continue; 1592 } 1593 if (ino_of_node(page) != ino) 1594 goto continue_unlock; 1595 1596 if (!PageDirty(page)) { 1597 /* someone wrote it for us */ 1598 goto continue_unlock; 1599 } 1600 1601 if (last_page) 1602 f2fs_put_page(last_page, 0); 1603 1604 get_page(page); 1605 last_page = page; 1606 unlock_page(page); 1607 } 1608 folio_batch_release(&fbatch); 1609 cond_resched(); 1610 } 1611 return last_page; 1612 } 1613 1614 static int __write_node_page(struct page *page, bool atomic, bool *submitted, 1615 struct writeback_control *wbc, bool do_balance, 1616 enum iostat_type io_type, unsigned int *seq_id) 1617 { 1618 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 1619 struct folio *folio = page_folio(page); 1620 nid_t nid; 1621 struct node_info ni; 1622 struct f2fs_io_info fio = { 1623 .sbi = sbi, 1624 .ino = ino_of_node(page), 1625 .type = NODE, 1626 .op = REQ_OP_WRITE, 1627 .op_flags = wbc_to_write_flags(wbc), 1628 .page = page, 1629 .encrypted_page = NULL, 1630 .submitted = 0, 1631 .io_type = io_type, 1632 .io_wbc = wbc, 1633 }; 1634 unsigned int seq; 1635 1636 trace_f2fs_writepage(folio, NODE); 1637 1638 if (unlikely(f2fs_cp_error(sbi))) { 1639 /* keep node pages in remount-ro mode */ 1640 if (F2FS_OPTION(sbi).errors == MOUNT_ERRORS_READONLY) 1641 goto redirty_out; 1642 folio_clear_uptodate(folio); 1643 dec_page_count(sbi, F2FS_DIRTY_NODES); 1644 folio_unlock(folio); 1645 return 0; 1646 } 1647 1648 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) 1649 goto redirty_out; 1650 1651 if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) && 1652 wbc->sync_mode == WB_SYNC_NONE && 1653 IS_DNODE(page) && is_cold_node(page)) 1654 goto redirty_out; 1655 1656 /* get old block addr of this node page */ 1657 nid = nid_of_node(page); 1658 f2fs_bug_on(sbi, folio->index != nid); 1659 1660 if (f2fs_get_node_info(sbi, nid, &ni, !do_balance)) 1661 goto redirty_out; 1662 1663 if (wbc->for_reclaim) { 1664 if (!f2fs_down_read_trylock(&sbi->node_write)) 1665 goto redirty_out; 1666 } else { 1667 f2fs_down_read(&sbi->node_write); 1668 } 1669 1670 /* This page is already truncated */ 1671 if (unlikely(ni.blk_addr == NULL_ADDR)) { 1672 folio_clear_uptodate(folio); 1673 dec_page_count(sbi, F2FS_DIRTY_NODES); 1674 f2fs_up_read(&sbi->node_write); 1675 folio_unlock(folio); 1676 return 0; 1677 } 1678 1679 if (__is_valid_data_blkaddr(ni.blk_addr) && 1680 !f2fs_is_valid_blkaddr(sbi, ni.blk_addr, 1681 DATA_GENERIC_ENHANCE)) { 1682 f2fs_up_read(&sbi->node_write); 1683 goto redirty_out; 1684 } 1685 1686 if (atomic && !test_opt(sbi, NOBARRIER)) 1687 fio.op_flags |= REQ_PREFLUSH | REQ_FUA; 1688 1689 /* should add to global list before clearing PAGECACHE status */ 1690 if (f2fs_in_warm_node_list(sbi, page)) { 1691 seq = f2fs_add_fsync_node_entry(sbi, page); 1692 if (seq_id) 1693 *seq_id = seq; 1694 } 1695 1696 folio_start_writeback(folio); 1697 1698 fio.old_blkaddr = ni.blk_addr; 1699 f2fs_do_write_node_page(nid, &fio); 1700 set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page)); 1701 dec_page_count(sbi, F2FS_DIRTY_NODES); 1702 f2fs_up_read(&sbi->node_write); 1703 1704 if (wbc->for_reclaim) { 1705 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, NODE); 1706 submitted = NULL; 1707 } 1708 1709 folio_unlock(folio); 1710 1711 if (unlikely(f2fs_cp_error(sbi))) { 1712 f2fs_submit_merged_write(sbi, NODE); 1713 submitted = NULL; 1714 } 1715 if (submitted) 1716 *submitted = fio.submitted; 1717 1718 if (do_balance) 1719 f2fs_balance_fs(sbi, false); 1720 return 0; 1721 1722 redirty_out: 1723 folio_redirty_for_writepage(wbc, folio); 1724 return AOP_WRITEPAGE_ACTIVATE; 1725 } 1726 1727 int f2fs_move_node_page(struct page *node_page, int gc_type) 1728 { 1729 int err = 0; 1730 1731 if (gc_type == FG_GC) { 1732 struct writeback_control wbc = { 1733 .sync_mode = WB_SYNC_ALL, 1734 .nr_to_write = 1, 1735 .for_reclaim = 0, 1736 }; 1737 1738 f2fs_wait_on_page_writeback(node_page, NODE, true, true); 1739 1740 set_page_dirty(node_page); 1741 1742 if (!clear_page_dirty_for_io(node_page)) { 1743 err = -EAGAIN; 1744 goto out_page; 1745 } 1746 1747 if (__write_node_page(node_page, false, NULL, 1748 &wbc, false, FS_GC_NODE_IO, NULL)) { 1749 err = -EAGAIN; 1750 unlock_page(node_page); 1751 } 1752 goto release_page; 1753 } else { 1754 /* set page dirty and write it */ 1755 if (!folio_test_writeback(page_folio(node_page))) 1756 set_page_dirty(node_page); 1757 } 1758 out_page: 1759 unlock_page(node_page); 1760 release_page: 1761 f2fs_put_page(node_page, 0); 1762 return err; 1763 } 1764 1765 static int f2fs_write_node_page(struct page *page, 1766 struct writeback_control *wbc) 1767 { 1768 return __write_node_page(page, false, NULL, wbc, false, 1769 FS_NODE_IO, NULL); 1770 } 1771 1772 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode, 1773 struct writeback_control *wbc, bool atomic, 1774 unsigned int *seq_id) 1775 { 1776 pgoff_t index; 1777 struct folio_batch fbatch; 1778 int ret = 0; 1779 struct page *last_page = NULL; 1780 bool marked = false; 1781 nid_t ino = inode->i_ino; 1782 int nr_folios; 1783 int nwritten = 0; 1784 1785 if (atomic) { 1786 last_page = last_fsync_dnode(sbi, ino); 1787 if (IS_ERR_OR_NULL(last_page)) 1788 return PTR_ERR_OR_ZERO(last_page); 1789 } 1790 retry: 1791 folio_batch_init(&fbatch); 1792 index = 0; 1793 1794 while ((nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), &index, 1795 (pgoff_t)-1, PAGECACHE_TAG_DIRTY, 1796 &fbatch))) { 1797 int i; 1798 1799 for (i = 0; i < nr_folios; i++) { 1800 struct page *page = &fbatch.folios[i]->page; 1801 bool submitted = false; 1802 1803 if (unlikely(f2fs_cp_error(sbi))) { 1804 f2fs_put_page(last_page, 0); 1805 folio_batch_release(&fbatch); 1806 ret = -EIO; 1807 goto out; 1808 } 1809 1810 if (!IS_DNODE(page) || !is_cold_node(page)) 1811 continue; 1812 if (ino_of_node(page) != ino) 1813 continue; 1814 1815 lock_page(page); 1816 1817 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1818 continue_unlock: 1819 unlock_page(page); 1820 continue; 1821 } 1822 if (ino_of_node(page) != ino) 1823 goto continue_unlock; 1824 1825 if (!PageDirty(page) && page != last_page) { 1826 /* someone wrote it for us */ 1827 goto continue_unlock; 1828 } 1829 1830 f2fs_wait_on_page_writeback(page, NODE, true, true); 1831 1832 set_fsync_mark(page, 0); 1833 set_dentry_mark(page, 0); 1834 1835 if (!atomic || page == last_page) { 1836 set_fsync_mark(page, 1); 1837 percpu_counter_inc(&sbi->rf_node_block_count); 1838 if (IS_INODE(page)) { 1839 if (is_inode_flag_set(inode, 1840 FI_DIRTY_INODE)) 1841 f2fs_update_inode(inode, page); 1842 set_dentry_mark(page, 1843 f2fs_need_dentry_mark(sbi, ino)); 1844 } 1845 /* may be written by other thread */ 1846 if (!PageDirty(page)) 1847 set_page_dirty(page); 1848 } 1849 1850 if (!clear_page_dirty_for_io(page)) 1851 goto continue_unlock; 1852 1853 ret = __write_node_page(page, atomic && 1854 page == last_page, 1855 &submitted, wbc, true, 1856 FS_NODE_IO, seq_id); 1857 if (ret) { 1858 unlock_page(page); 1859 f2fs_put_page(last_page, 0); 1860 break; 1861 } else if (submitted) { 1862 nwritten++; 1863 } 1864 1865 if (page == last_page) { 1866 f2fs_put_page(page, 0); 1867 marked = true; 1868 break; 1869 } 1870 } 1871 folio_batch_release(&fbatch); 1872 cond_resched(); 1873 1874 if (ret || marked) 1875 break; 1876 } 1877 if (!ret && atomic && !marked) { 1878 f2fs_debug(sbi, "Retry to write fsync mark: ino=%u, idx=%lx", 1879 ino, page_folio(last_page)->index); 1880 lock_page(last_page); 1881 f2fs_wait_on_page_writeback(last_page, NODE, true, true); 1882 set_page_dirty(last_page); 1883 unlock_page(last_page); 1884 goto retry; 1885 } 1886 out: 1887 if (nwritten) 1888 f2fs_submit_merged_write_cond(sbi, NULL, NULL, ino, NODE); 1889 return ret ? -EIO : 0; 1890 } 1891 1892 static int f2fs_match_ino(struct inode *inode, unsigned long ino, void *data) 1893 { 1894 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1895 bool clean; 1896 1897 if (inode->i_ino != ino) 1898 return 0; 1899 1900 if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) 1901 return 0; 1902 1903 spin_lock(&sbi->inode_lock[DIRTY_META]); 1904 clean = list_empty(&F2FS_I(inode)->gdirty_list); 1905 spin_unlock(&sbi->inode_lock[DIRTY_META]); 1906 1907 if (clean) 1908 return 0; 1909 1910 inode = igrab(inode); 1911 if (!inode) 1912 return 0; 1913 return 1; 1914 } 1915 1916 static bool flush_dirty_inode(struct page *page) 1917 { 1918 struct f2fs_sb_info *sbi = F2FS_P_SB(page); 1919 struct inode *inode; 1920 nid_t ino = ino_of_node(page); 1921 1922 inode = find_inode_nowait(sbi->sb, ino, f2fs_match_ino, NULL); 1923 if (!inode) 1924 return false; 1925 1926 f2fs_update_inode(inode, page); 1927 unlock_page(page); 1928 1929 iput(inode); 1930 return true; 1931 } 1932 1933 void f2fs_flush_inline_data(struct f2fs_sb_info *sbi) 1934 { 1935 pgoff_t index = 0; 1936 struct folio_batch fbatch; 1937 int nr_folios; 1938 1939 folio_batch_init(&fbatch); 1940 1941 while ((nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), &index, 1942 (pgoff_t)-1, PAGECACHE_TAG_DIRTY, 1943 &fbatch))) { 1944 int i; 1945 1946 for (i = 0; i < nr_folios; i++) { 1947 struct page *page = &fbatch.folios[i]->page; 1948 1949 if (!IS_INODE(page)) 1950 continue; 1951 1952 lock_page(page); 1953 1954 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 1955 continue_unlock: 1956 unlock_page(page); 1957 continue; 1958 } 1959 1960 if (!PageDirty(page)) { 1961 /* someone wrote it for us */ 1962 goto continue_unlock; 1963 } 1964 1965 /* flush inline_data, if it's async context. */ 1966 if (page_private_inline(page)) { 1967 clear_page_private_inline(page); 1968 unlock_page(page); 1969 flush_inline_data(sbi, ino_of_node(page)); 1970 continue; 1971 } 1972 unlock_page(page); 1973 } 1974 folio_batch_release(&fbatch); 1975 cond_resched(); 1976 } 1977 } 1978 1979 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi, 1980 struct writeback_control *wbc, 1981 bool do_balance, enum iostat_type io_type) 1982 { 1983 pgoff_t index; 1984 struct folio_batch fbatch; 1985 int step = 0; 1986 int nwritten = 0; 1987 int ret = 0; 1988 int nr_folios, done = 0; 1989 1990 folio_batch_init(&fbatch); 1991 1992 next_step: 1993 index = 0; 1994 1995 while (!done && (nr_folios = filemap_get_folios_tag(NODE_MAPPING(sbi), 1996 &index, (pgoff_t)-1, PAGECACHE_TAG_DIRTY, 1997 &fbatch))) { 1998 int i; 1999 2000 for (i = 0; i < nr_folios; i++) { 2001 struct page *page = &fbatch.folios[i]->page; 2002 bool submitted = false; 2003 2004 /* give a priority to WB_SYNC threads */ 2005 if (atomic_read(&sbi->wb_sync_req[NODE]) && 2006 wbc->sync_mode == WB_SYNC_NONE) { 2007 done = 1; 2008 break; 2009 } 2010 2011 /* 2012 * flushing sequence with step: 2013 * 0. indirect nodes 2014 * 1. dentry dnodes 2015 * 2. file dnodes 2016 */ 2017 if (step == 0 && IS_DNODE(page)) 2018 continue; 2019 if (step == 1 && (!IS_DNODE(page) || 2020 is_cold_node(page))) 2021 continue; 2022 if (step == 2 && (!IS_DNODE(page) || 2023 !is_cold_node(page))) 2024 continue; 2025 lock_node: 2026 if (wbc->sync_mode == WB_SYNC_ALL) 2027 lock_page(page); 2028 else if (!trylock_page(page)) 2029 continue; 2030 2031 if (unlikely(page->mapping != NODE_MAPPING(sbi))) { 2032 continue_unlock: 2033 unlock_page(page); 2034 continue; 2035 } 2036 2037 if (!PageDirty(page)) { 2038 /* someone wrote it for us */ 2039 goto continue_unlock; 2040 } 2041 2042 /* flush inline_data/inode, if it's async context. */ 2043 if (!do_balance) 2044 goto write_node; 2045 2046 /* flush inline_data */ 2047 if (page_private_inline(page)) { 2048 clear_page_private_inline(page); 2049 unlock_page(page); 2050 flush_inline_data(sbi, ino_of_node(page)); 2051 goto lock_node; 2052 } 2053 2054 /* flush dirty inode */ 2055 if (IS_INODE(page) && flush_dirty_inode(page)) 2056 goto lock_node; 2057 write_node: 2058 f2fs_wait_on_page_writeback(page, NODE, true, true); 2059 2060 if (!clear_page_dirty_for_io(page)) 2061 goto continue_unlock; 2062 2063 set_fsync_mark(page, 0); 2064 set_dentry_mark(page, 0); 2065 2066 ret = __write_node_page(page, false, &submitted, 2067 wbc, do_balance, io_type, NULL); 2068 if (ret) 2069 unlock_page(page); 2070 else if (submitted) 2071 nwritten++; 2072 2073 if (--wbc->nr_to_write == 0) 2074 break; 2075 } 2076 folio_batch_release(&fbatch); 2077 cond_resched(); 2078 2079 if (wbc->nr_to_write == 0) { 2080 step = 2; 2081 break; 2082 } 2083 } 2084 2085 if (step < 2) { 2086 if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) && 2087 wbc->sync_mode == WB_SYNC_NONE && step == 1) 2088 goto out; 2089 step++; 2090 goto next_step; 2091 } 2092 out: 2093 if (nwritten) 2094 f2fs_submit_merged_write(sbi, NODE); 2095 2096 if (unlikely(f2fs_cp_error(sbi))) 2097 return -EIO; 2098 return ret; 2099 } 2100 2101 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, 2102 unsigned int seq_id) 2103 { 2104 struct fsync_node_entry *fn; 2105 struct page *page; 2106 struct list_head *head = &sbi->fsync_node_list; 2107 unsigned long flags; 2108 unsigned int cur_seq_id = 0; 2109 2110 while (seq_id && cur_seq_id < seq_id) { 2111 spin_lock_irqsave(&sbi->fsync_node_lock, flags); 2112 if (list_empty(head)) { 2113 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 2114 break; 2115 } 2116 fn = list_first_entry(head, struct fsync_node_entry, list); 2117 if (fn->seq_id > seq_id) { 2118 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 2119 break; 2120 } 2121 cur_seq_id = fn->seq_id; 2122 page = fn->page; 2123 get_page(page); 2124 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags); 2125 2126 f2fs_wait_on_page_writeback(page, NODE, true, false); 2127 2128 put_page(page); 2129 } 2130 2131 return filemap_check_errors(NODE_MAPPING(sbi)); 2132 } 2133 2134 static int f2fs_write_node_pages(struct address_space *mapping, 2135 struct writeback_control *wbc) 2136 { 2137 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping); 2138 struct blk_plug plug; 2139 long diff; 2140 2141 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) 2142 goto skip_write; 2143 2144 /* balancing f2fs's metadata in background */ 2145 f2fs_balance_fs_bg(sbi, true); 2146 2147 /* collect a number of dirty node pages and write together */ 2148 if (wbc->sync_mode != WB_SYNC_ALL && 2149 get_pages(sbi, F2FS_DIRTY_NODES) < 2150 nr_pages_to_skip(sbi, NODE)) 2151 goto skip_write; 2152 2153 if (wbc->sync_mode == WB_SYNC_ALL) 2154 atomic_inc(&sbi->wb_sync_req[NODE]); 2155 else if (atomic_read(&sbi->wb_sync_req[NODE])) { 2156 /* to avoid potential deadlock */ 2157 if (current->plug) 2158 blk_finish_plug(current->plug); 2159 goto skip_write; 2160 } 2161 2162 trace_f2fs_writepages(mapping->host, wbc, NODE); 2163 2164 diff = nr_pages_to_write(sbi, NODE, wbc); 2165 blk_start_plug(&plug); 2166 f2fs_sync_node_pages(sbi, wbc, true, FS_NODE_IO); 2167 blk_finish_plug(&plug); 2168 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff); 2169 2170 if (wbc->sync_mode == WB_SYNC_ALL) 2171 atomic_dec(&sbi->wb_sync_req[NODE]); 2172 return 0; 2173 2174 skip_write: 2175 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES); 2176 trace_f2fs_writepages(mapping->host, wbc, NODE); 2177 return 0; 2178 } 2179 2180 static bool f2fs_dirty_node_folio(struct address_space *mapping, 2181 struct folio *folio) 2182 { 2183 trace_f2fs_set_page_dirty(folio, NODE); 2184 2185 if (!folio_test_uptodate(folio)) 2186 folio_mark_uptodate(folio); 2187 #ifdef CONFIG_F2FS_CHECK_FS 2188 if (IS_INODE(&folio->page)) 2189 f2fs_inode_chksum_set(F2FS_M_SB(mapping), &folio->page); 2190 #endif 2191 if (filemap_dirty_folio(mapping, folio)) { 2192 inc_page_count(F2FS_M_SB(mapping), F2FS_DIRTY_NODES); 2193 set_page_private_reference(&folio->page); 2194 return true; 2195 } 2196 return false; 2197 } 2198 2199 /* 2200 * Structure of the f2fs node operations 2201 */ 2202 const struct address_space_operations f2fs_node_aops = { 2203 .writepage = f2fs_write_node_page, 2204 .writepages = f2fs_write_node_pages, 2205 .dirty_folio = f2fs_dirty_node_folio, 2206 .invalidate_folio = f2fs_invalidate_folio, 2207 .release_folio = f2fs_release_folio, 2208 .migrate_folio = filemap_migrate_folio, 2209 }; 2210 2211 static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i, 2212 nid_t n) 2213 { 2214 return radix_tree_lookup(&nm_i->free_nid_root, n); 2215 } 2216 2217 static int __insert_free_nid(struct f2fs_sb_info *sbi, 2218 struct free_nid *i) 2219 { 2220 struct f2fs_nm_info *nm_i = NM_I(sbi); 2221 int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i); 2222 2223 if (err) 2224 return err; 2225 2226 nm_i->nid_cnt[FREE_NID]++; 2227 list_add_tail(&i->list, &nm_i->free_nid_list); 2228 return 0; 2229 } 2230 2231 static void __remove_free_nid(struct f2fs_sb_info *sbi, 2232 struct free_nid *i, enum nid_state state) 2233 { 2234 struct f2fs_nm_info *nm_i = NM_I(sbi); 2235 2236 f2fs_bug_on(sbi, state != i->state); 2237 nm_i->nid_cnt[state]--; 2238 if (state == FREE_NID) 2239 list_del(&i->list); 2240 radix_tree_delete(&nm_i->free_nid_root, i->nid); 2241 } 2242 2243 static void __move_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i, 2244 enum nid_state org_state, enum nid_state dst_state) 2245 { 2246 struct f2fs_nm_info *nm_i = NM_I(sbi); 2247 2248 f2fs_bug_on(sbi, org_state != i->state); 2249 i->state = dst_state; 2250 nm_i->nid_cnt[org_state]--; 2251 nm_i->nid_cnt[dst_state]++; 2252 2253 switch (dst_state) { 2254 case PREALLOC_NID: 2255 list_del(&i->list); 2256 break; 2257 case FREE_NID: 2258 list_add_tail(&i->list, &nm_i->free_nid_list); 2259 break; 2260 default: 2261 BUG_ON(1); 2262 } 2263 } 2264 2265 bool f2fs_nat_bitmap_enabled(struct f2fs_sb_info *sbi) 2266 { 2267 struct f2fs_nm_info *nm_i = NM_I(sbi); 2268 unsigned int i; 2269 bool ret = true; 2270 2271 f2fs_down_read(&nm_i->nat_tree_lock); 2272 for (i = 0; i < nm_i->nat_blocks; i++) { 2273 if (!test_bit_le(i, nm_i->nat_block_bitmap)) { 2274 ret = false; 2275 break; 2276 } 2277 } 2278 f2fs_up_read(&nm_i->nat_tree_lock); 2279 2280 return ret; 2281 } 2282 2283 static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid, 2284 bool set, bool build) 2285 { 2286 struct f2fs_nm_info *nm_i = NM_I(sbi); 2287 unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid); 2288 unsigned int nid_ofs = nid - START_NID(nid); 2289 2290 if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap)) 2291 return; 2292 2293 if (set) { 2294 if (test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs])) 2295 return; 2296 __set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); 2297 nm_i->free_nid_count[nat_ofs]++; 2298 } else { 2299 if (!test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs])) 2300 return; 2301 __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); 2302 if (!build) 2303 nm_i->free_nid_count[nat_ofs]--; 2304 } 2305 } 2306 2307 /* return if the nid is recognized as free */ 2308 static bool add_free_nid(struct f2fs_sb_info *sbi, 2309 nid_t nid, bool build, bool update) 2310 { 2311 struct f2fs_nm_info *nm_i = NM_I(sbi); 2312 struct free_nid *i, *e; 2313 struct nat_entry *ne; 2314 int err = -EINVAL; 2315 bool ret = false; 2316 2317 /* 0 nid should not be used */ 2318 if (unlikely(nid == 0)) 2319 return false; 2320 2321 if (unlikely(f2fs_check_nid_range(sbi, nid))) 2322 return false; 2323 2324 i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS, true, NULL); 2325 i->nid = nid; 2326 i->state = FREE_NID; 2327 2328 radix_tree_preload(GFP_NOFS | __GFP_NOFAIL); 2329 2330 spin_lock(&nm_i->nid_list_lock); 2331 2332 if (build) { 2333 /* 2334 * Thread A Thread B 2335 * - f2fs_create 2336 * - f2fs_new_inode 2337 * - f2fs_alloc_nid 2338 * - __insert_nid_to_list(PREALLOC_NID) 2339 * - f2fs_balance_fs_bg 2340 * - f2fs_build_free_nids 2341 * - __f2fs_build_free_nids 2342 * - scan_nat_page 2343 * - add_free_nid 2344 * - __lookup_nat_cache 2345 * - f2fs_add_link 2346 * - f2fs_init_inode_metadata 2347 * - f2fs_new_inode_page 2348 * - f2fs_new_node_page 2349 * - set_node_addr 2350 * - f2fs_alloc_nid_done 2351 * - __remove_nid_from_list(PREALLOC_NID) 2352 * - __insert_nid_to_list(FREE_NID) 2353 */ 2354 ne = __lookup_nat_cache(nm_i, nid); 2355 if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) || 2356 nat_get_blkaddr(ne) != NULL_ADDR)) 2357 goto err_out; 2358 2359 e = __lookup_free_nid_list(nm_i, nid); 2360 if (e) { 2361 if (e->state == FREE_NID) 2362 ret = true; 2363 goto err_out; 2364 } 2365 } 2366 ret = true; 2367 err = __insert_free_nid(sbi, i); 2368 err_out: 2369 if (update) { 2370 update_free_nid_bitmap(sbi, nid, ret, build); 2371 if (!build) 2372 nm_i->available_nids++; 2373 } 2374 spin_unlock(&nm_i->nid_list_lock); 2375 radix_tree_preload_end(); 2376 2377 if (err) 2378 kmem_cache_free(free_nid_slab, i); 2379 return ret; 2380 } 2381 2382 static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid) 2383 { 2384 struct f2fs_nm_info *nm_i = NM_I(sbi); 2385 struct free_nid *i; 2386 bool need_free = false; 2387 2388 spin_lock(&nm_i->nid_list_lock); 2389 i = __lookup_free_nid_list(nm_i, nid); 2390 if (i && i->state == FREE_NID) { 2391 __remove_free_nid(sbi, i, FREE_NID); 2392 need_free = true; 2393 } 2394 spin_unlock(&nm_i->nid_list_lock); 2395 2396 if (need_free) 2397 kmem_cache_free(free_nid_slab, i); 2398 } 2399 2400 static int scan_nat_page(struct f2fs_sb_info *sbi, 2401 struct page *nat_page, nid_t start_nid) 2402 { 2403 struct f2fs_nm_info *nm_i = NM_I(sbi); 2404 struct f2fs_nat_block *nat_blk = page_address(nat_page); 2405 block_t blk_addr; 2406 unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid); 2407 int i; 2408 2409 __set_bit_le(nat_ofs, nm_i->nat_block_bitmap); 2410 2411 i = start_nid % NAT_ENTRY_PER_BLOCK; 2412 2413 for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) { 2414 if (unlikely(start_nid >= nm_i->max_nid)) 2415 break; 2416 2417 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr); 2418 2419 if (blk_addr == NEW_ADDR) 2420 return -EFSCORRUPTED; 2421 2422 if (blk_addr == NULL_ADDR) { 2423 add_free_nid(sbi, start_nid, true, true); 2424 } else { 2425 spin_lock(&NM_I(sbi)->nid_list_lock); 2426 update_free_nid_bitmap(sbi, start_nid, false, true); 2427 spin_unlock(&NM_I(sbi)->nid_list_lock); 2428 } 2429 } 2430 2431 return 0; 2432 } 2433 2434 static void scan_curseg_cache(struct f2fs_sb_info *sbi) 2435 { 2436 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 2437 struct f2fs_journal *journal = curseg->journal; 2438 int i; 2439 2440 down_read(&curseg->journal_rwsem); 2441 for (i = 0; i < nats_in_cursum(journal); i++) { 2442 block_t addr; 2443 nid_t nid; 2444 2445 addr = le32_to_cpu(nat_in_journal(journal, i).block_addr); 2446 nid = le32_to_cpu(nid_in_journal(journal, i)); 2447 if (addr == NULL_ADDR) 2448 add_free_nid(sbi, nid, true, false); 2449 else 2450 remove_free_nid(sbi, nid); 2451 } 2452 up_read(&curseg->journal_rwsem); 2453 } 2454 2455 static void scan_free_nid_bits(struct f2fs_sb_info *sbi) 2456 { 2457 struct f2fs_nm_info *nm_i = NM_I(sbi); 2458 unsigned int i, idx; 2459 nid_t nid; 2460 2461 f2fs_down_read(&nm_i->nat_tree_lock); 2462 2463 for (i = 0; i < nm_i->nat_blocks; i++) { 2464 if (!test_bit_le(i, nm_i->nat_block_bitmap)) 2465 continue; 2466 if (!nm_i->free_nid_count[i]) 2467 continue; 2468 for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) { 2469 idx = find_next_bit_le(nm_i->free_nid_bitmap[i], 2470 NAT_ENTRY_PER_BLOCK, idx); 2471 if (idx >= NAT_ENTRY_PER_BLOCK) 2472 break; 2473 2474 nid = i * NAT_ENTRY_PER_BLOCK + idx; 2475 add_free_nid(sbi, nid, true, false); 2476 2477 if (nm_i->nid_cnt[FREE_NID] >= MAX_FREE_NIDS) 2478 goto out; 2479 } 2480 } 2481 out: 2482 scan_curseg_cache(sbi); 2483 2484 f2fs_up_read(&nm_i->nat_tree_lock); 2485 } 2486 2487 static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi, 2488 bool sync, bool mount) 2489 { 2490 struct f2fs_nm_info *nm_i = NM_I(sbi); 2491 int i = 0, ret; 2492 nid_t nid = nm_i->next_scan_nid; 2493 2494 if (unlikely(nid >= nm_i->max_nid)) 2495 nid = 0; 2496 2497 if (unlikely(nid % NAT_ENTRY_PER_BLOCK)) 2498 nid = NAT_BLOCK_OFFSET(nid) * NAT_ENTRY_PER_BLOCK; 2499 2500 /* Enough entries */ 2501 if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK) 2502 return 0; 2503 2504 if (!sync && !f2fs_available_free_memory(sbi, FREE_NIDS)) 2505 return 0; 2506 2507 if (!mount) { 2508 /* try to find free nids in free_nid_bitmap */ 2509 scan_free_nid_bits(sbi); 2510 2511 if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK) 2512 return 0; 2513 } 2514 2515 /* readahead nat pages to be scanned */ 2516 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES, 2517 META_NAT, true); 2518 2519 f2fs_down_read(&nm_i->nat_tree_lock); 2520 2521 while (1) { 2522 if (!test_bit_le(NAT_BLOCK_OFFSET(nid), 2523 nm_i->nat_block_bitmap)) { 2524 struct page *page = get_current_nat_page(sbi, nid); 2525 2526 if (IS_ERR(page)) { 2527 ret = PTR_ERR(page); 2528 } else { 2529 ret = scan_nat_page(sbi, page, nid); 2530 f2fs_put_page(page, 1); 2531 } 2532 2533 if (ret) { 2534 f2fs_up_read(&nm_i->nat_tree_lock); 2535 2536 if (ret == -EFSCORRUPTED) { 2537 f2fs_err(sbi, "NAT is corrupt, run fsck to fix it"); 2538 set_sbi_flag(sbi, SBI_NEED_FSCK); 2539 f2fs_handle_error(sbi, 2540 ERROR_INCONSISTENT_NAT); 2541 } 2542 2543 return ret; 2544 } 2545 } 2546 2547 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK)); 2548 if (unlikely(nid >= nm_i->max_nid)) 2549 nid = 0; 2550 2551 if (++i >= FREE_NID_PAGES) 2552 break; 2553 } 2554 2555 /* go to the next free nat pages to find free nids abundantly */ 2556 nm_i->next_scan_nid = nid; 2557 2558 /* find free nids from current sum_pages */ 2559 scan_curseg_cache(sbi); 2560 2561 f2fs_up_read(&nm_i->nat_tree_lock); 2562 2563 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid), 2564 nm_i->ra_nid_pages, META_NAT, false); 2565 2566 return 0; 2567 } 2568 2569 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount) 2570 { 2571 int ret; 2572 2573 mutex_lock(&NM_I(sbi)->build_lock); 2574 ret = __f2fs_build_free_nids(sbi, sync, mount); 2575 mutex_unlock(&NM_I(sbi)->build_lock); 2576 2577 return ret; 2578 } 2579 2580 /* 2581 * If this function returns success, caller can obtain a new nid 2582 * from second parameter of this function. 2583 * The returned nid could be used ino as well as nid when inode is created. 2584 */ 2585 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid) 2586 { 2587 struct f2fs_nm_info *nm_i = NM_I(sbi); 2588 struct free_nid *i = NULL; 2589 retry: 2590 if (time_to_inject(sbi, FAULT_ALLOC_NID)) 2591 return false; 2592 2593 spin_lock(&nm_i->nid_list_lock); 2594 2595 if (unlikely(nm_i->available_nids == 0)) { 2596 spin_unlock(&nm_i->nid_list_lock); 2597 return false; 2598 } 2599 2600 /* We should not use stale free nids created by f2fs_build_free_nids */ 2601 if (nm_i->nid_cnt[FREE_NID] && !on_f2fs_build_free_nids(nm_i)) { 2602 f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list)); 2603 i = list_first_entry(&nm_i->free_nid_list, 2604 struct free_nid, list); 2605 *nid = i->nid; 2606 2607 __move_free_nid(sbi, i, FREE_NID, PREALLOC_NID); 2608 nm_i->available_nids--; 2609 2610 update_free_nid_bitmap(sbi, *nid, false, false); 2611 2612 spin_unlock(&nm_i->nid_list_lock); 2613 return true; 2614 } 2615 spin_unlock(&nm_i->nid_list_lock); 2616 2617 /* Let's scan nat pages and its caches to get free nids */ 2618 if (!f2fs_build_free_nids(sbi, true, false)) 2619 goto retry; 2620 return false; 2621 } 2622 2623 /* 2624 * f2fs_alloc_nid() should be called prior to this function. 2625 */ 2626 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid) 2627 { 2628 struct f2fs_nm_info *nm_i = NM_I(sbi); 2629 struct free_nid *i; 2630 2631 spin_lock(&nm_i->nid_list_lock); 2632 i = __lookup_free_nid_list(nm_i, nid); 2633 f2fs_bug_on(sbi, !i); 2634 __remove_free_nid(sbi, i, PREALLOC_NID); 2635 spin_unlock(&nm_i->nid_list_lock); 2636 2637 kmem_cache_free(free_nid_slab, i); 2638 } 2639 2640 /* 2641 * f2fs_alloc_nid() should be called prior to this function. 2642 */ 2643 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid) 2644 { 2645 struct f2fs_nm_info *nm_i = NM_I(sbi); 2646 struct free_nid *i; 2647 bool need_free = false; 2648 2649 if (!nid) 2650 return; 2651 2652 spin_lock(&nm_i->nid_list_lock); 2653 i = __lookup_free_nid_list(nm_i, nid); 2654 f2fs_bug_on(sbi, !i); 2655 2656 if (!f2fs_available_free_memory(sbi, FREE_NIDS)) { 2657 __remove_free_nid(sbi, i, PREALLOC_NID); 2658 need_free = true; 2659 } else { 2660 __move_free_nid(sbi, i, PREALLOC_NID, FREE_NID); 2661 } 2662 2663 nm_i->available_nids++; 2664 2665 update_free_nid_bitmap(sbi, nid, true, false); 2666 2667 spin_unlock(&nm_i->nid_list_lock); 2668 2669 if (need_free) 2670 kmem_cache_free(free_nid_slab, i); 2671 } 2672 2673 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink) 2674 { 2675 struct f2fs_nm_info *nm_i = NM_I(sbi); 2676 int nr = nr_shrink; 2677 2678 if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS) 2679 return 0; 2680 2681 if (!mutex_trylock(&nm_i->build_lock)) 2682 return 0; 2683 2684 while (nr_shrink && nm_i->nid_cnt[FREE_NID] > MAX_FREE_NIDS) { 2685 struct free_nid *i, *next; 2686 unsigned int batch = SHRINK_NID_BATCH_SIZE; 2687 2688 spin_lock(&nm_i->nid_list_lock); 2689 list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) { 2690 if (!nr_shrink || !batch || 2691 nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS) 2692 break; 2693 __remove_free_nid(sbi, i, FREE_NID); 2694 kmem_cache_free(free_nid_slab, i); 2695 nr_shrink--; 2696 batch--; 2697 } 2698 spin_unlock(&nm_i->nid_list_lock); 2699 } 2700 2701 mutex_unlock(&nm_i->build_lock); 2702 2703 return nr - nr_shrink; 2704 } 2705 2706 int f2fs_recover_inline_xattr(struct inode *inode, struct page *page) 2707 { 2708 void *src_addr, *dst_addr; 2709 size_t inline_size; 2710 struct page *ipage; 2711 struct f2fs_inode *ri; 2712 2713 ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino); 2714 if (IS_ERR(ipage)) 2715 return PTR_ERR(ipage); 2716 2717 ri = F2FS_INODE(page); 2718 if (ri->i_inline & F2FS_INLINE_XATTR) { 2719 if (!f2fs_has_inline_xattr(inode)) { 2720 set_inode_flag(inode, FI_INLINE_XATTR); 2721 stat_inc_inline_xattr(inode); 2722 } 2723 } else { 2724 if (f2fs_has_inline_xattr(inode)) { 2725 stat_dec_inline_xattr(inode); 2726 clear_inode_flag(inode, FI_INLINE_XATTR); 2727 } 2728 goto update_inode; 2729 } 2730 2731 dst_addr = inline_xattr_addr(inode, ipage); 2732 src_addr = inline_xattr_addr(inode, page); 2733 inline_size = inline_xattr_size(inode); 2734 2735 f2fs_wait_on_page_writeback(ipage, NODE, true, true); 2736 memcpy(dst_addr, src_addr, inline_size); 2737 update_inode: 2738 f2fs_update_inode(inode, ipage); 2739 f2fs_put_page(ipage, 1); 2740 return 0; 2741 } 2742 2743 int f2fs_recover_xattr_data(struct inode *inode, struct page *page) 2744 { 2745 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 2746 nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid; 2747 nid_t new_xnid; 2748 struct dnode_of_data dn; 2749 struct node_info ni; 2750 struct page *xpage; 2751 int err; 2752 2753 if (!prev_xnid) 2754 goto recover_xnid; 2755 2756 /* 1: invalidate the previous xattr nid */ 2757 err = f2fs_get_node_info(sbi, prev_xnid, &ni, false); 2758 if (err) 2759 return err; 2760 2761 f2fs_invalidate_blocks(sbi, ni.blk_addr); 2762 dec_valid_node_count(sbi, inode, false); 2763 set_node_addr(sbi, &ni, NULL_ADDR, false); 2764 2765 recover_xnid: 2766 /* 2: update xattr nid in inode */ 2767 if (!f2fs_alloc_nid(sbi, &new_xnid)) 2768 return -ENOSPC; 2769 2770 set_new_dnode(&dn, inode, NULL, NULL, new_xnid); 2771 xpage = f2fs_new_node_page(&dn, XATTR_NODE_OFFSET); 2772 if (IS_ERR(xpage)) { 2773 f2fs_alloc_nid_failed(sbi, new_xnid); 2774 return PTR_ERR(xpage); 2775 } 2776 2777 f2fs_alloc_nid_done(sbi, new_xnid); 2778 f2fs_update_inode_page(inode); 2779 2780 /* 3: update and set xattr node page dirty */ 2781 if (page) { 2782 memcpy(F2FS_NODE(xpage), F2FS_NODE(page), 2783 VALID_XATTR_BLOCK_SIZE); 2784 set_page_dirty(xpage); 2785 } 2786 f2fs_put_page(xpage, 1); 2787 2788 return 0; 2789 } 2790 2791 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) 2792 { 2793 struct f2fs_inode *src, *dst; 2794 nid_t ino = ino_of_node(page); 2795 struct node_info old_ni, new_ni; 2796 struct page *ipage; 2797 int err; 2798 2799 err = f2fs_get_node_info(sbi, ino, &old_ni, false); 2800 if (err) 2801 return err; 2802 2803 if (unlikely(old_ni.blk_addr != NULL_ADDR)) 2804 return -EINVAL; 2805 retry: 2806 ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false); 2807 if (!ipage) { 2808 memalloc_retry_wait(GFP_NOFS); 2809 goto retry; 2810 } 2811 2812 /* Should not use this inode from free nid list */ 2813 remove_free_nid(sbi, ino); 2814 2815 if (!PageUptodate(ipage)) 2816 SetPageUptodate(ipage); 2817 fill_node_footer(ipage, ino, ino, 0, true); 2818 set_cold_node(ipage, false); 2819 2820 src = F2FS_INODE(page); 2821 dst = F2FS_INODE(ipage); 2822 2823 memcpy(dst, src, offsetof(struct f2fs_inode, i_ext)); 2824 dst->i_size = 0; 2825 dst->i_blocks = cpu_to_le64(1); 2826 dst->i_links = cpu_to_le32(1); 2827 dst->i_xattr_nid = 0; 2828 dst->i_inline = src->i_inline & (F2FS_INLINE_XATTR | F2FS_EXTRA_ATTR); 2829 if (dst->i_inline & F2FS_EXTRA_ATTR) { 2830 dst->i_extra_isize = src->i_extra_isize; 2831 2832 if (f2fs_sb_has_flexible_inline_xattr(sbi) && 2833 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize), 2834 i_inline_xattr_size)) 2835 dst->i_inline_xattr_size = src->i_inline_xattr_size; 2836 2837 if (f2fs_sb_has_project_quota(sbi) && 2838 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize), 2839 i_projid)) 2840 dst->i_projid = src->i_projid; 2841 2842 if (f2fs_sb_has_inode_crtime(sbi) && 2843 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize), 2844 i_crtime_nsec)) { 2845 dst->i_crtime = src->i_crtime; 2846 dst->i_crtime_nsec = src->i_crtime_nsec; 2847 } 2848 } 2849 2850 new_ni = old_ni; 2851 new_ni.ino = ino; 2852 2853 if (unlikely(inc_valid_node_count(sbi, NULL, true))) 2854 WARN_ON(1); 2855 set_node_addr(sbi, &new_ni, NEW_ADDR, false); 2856 inc_valid_inode_count(sbi); 2857 set_page_dirty(ipage); 2858 f2fs_put_page(ipage, 1); 2859 return 0; 2860 } 2861 2862 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi, 2863 unsigned int segno, struct f2fs_summary_block *sum) 2864 { 2865 struct f2fs_node *rn; 2866 struct f2fs_summary *sum_entry; 2867 block_t addr; 2868 int i, idx, last_offset, nrpages; 2869 2870 /* scan the node segment */ 2871 last_offset = BLKS_PER_SEG(sbi); 2872 addr = START_BLOCK(sbi, segno); 2873 sum_entry = &sum->entries[0]; 2874 2875 for (i = 0; i < last_offset; i += nrpages, addr += nrpages) { 2876 nrpages = bio_max_segs(last_offset - i); 2877 2878 /* readahead node pages */ 2879 f2fs_ra_meta_pages(sbi, addr, nrpages, META_POR, true); 2880 2881 for (idx = addr; idx < addr + nrpages; idx++) { 2882 struct page *page = f2fs_get_tmp_page(sbi, idx); 2883 2884 if (IS_ERR(page)) 2885 return PTR_ERR(page); 2886 2887 rn = F2FS_NODE(page); 2888 sum_entry->nid = rn->footer.nid; 2889 sum_entry->version = 0; 2890 sum_entry->ofs_in_node = 0; 2891 sum_entry++; 2892 f2fs_put_page(page, 1); 2893 } 2894 2895 invalidate_mapping_pages(META_MAPPING(sbi), addr, 2896 addr + nrpages); 2897 } 2898 return 0; 2899 } 2900 2901 static void remove_nats_in_journal(struct f2fs_sb_info *sbi) 2902 { 2903 struct f2fs_nm_info *nm_i = NM_I(sbi); 2904 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 2905 struct f2fs_journal *journal = curseg->journal; 2906 int i; 2907 2908 down_write(&curseg->journal_rwsem); 2909 for (i = 0; i < nats_in_cursum(journal); i++) { 2910 struct nat_entry *ne; 2911 struct f2fs_nat_entry raw_ne; 2912 nid_t nid = le32_to_cpu(nid_in_journal(journal, i)); 2913 2914 if (f2fs_check_nid_range(sbi, nid)) 2915 continue; 2916 2917 raw_ne = nat_in_journal(journal, i); 2918 2919 ne = __lookup_nat_cache(nm_i, nid); 2920 if (!ne) { 2921 ne = __alloc_nat_entry(sbi, nid, true); 2922 __init_nat_entry(nm_i, ne, &raw_ne, true); 2923 } 2924 2925 /* 2926 * if a free nat in journal has not been used after last 2927 * checkpoint, we should remove it from available nids, 2928 * since later we will add it again. 2929 */ 2930 if (!get_nat_flag(ne, IS_DIRTY) && 2931 le32_to_cpu(raw_ne.block_addr) == NULL_ADDR) { 2932 spin_lock(&nm_i->nid_list_lock); 2933 nm_i->available_nids--; 2934 spin_unlock(&nm_i->nid_list_lock); 2935 } 2936 2937 __set_nat_cache_dirty(nm_i, ne); 2938 } 2939 update_nats_in_cursum(journal, -i); 2940 up_write(&curseg->journal_rwsem); 2941 } 2942 2943 static void __adjust_nat_entry_set(struct nat_entry_set *nes, 2944 struct list_head *head, int max) 2945 { 2946 struct nat_entry_set *cur; 2947 2948 if (nes->entry_cnt >= max) 2949 goto add_out; 2950 2951 list_for_each_entry(cur, head, set_list) { 2952 if (cur->entry_cnt >= nes->entry_cnt) { 2953 list_add(&nes->set_list, cur->set_list.prev); 2954 return; 2955 } 2956 } 2957 add_out: 2958 list_add_tail(&nes->set_list, head); 2959 } 2960 2961 static void __update_nat_bits(struct f2fs_nm_info *nm_i, unsigned int nat_ofs, 2962 unsigned int valid) 2963 { 2964 if (valid == 0) { 2965 __set_bit_le(nat_ofs, nm_i->empty_nat_bits); 2966 __clear_bit_le(nat_ofs, nm_i->full_nat_bits); 2967 return; 2968 } 2969 2970 __clear_bit_le(nat_ofs, nm_i->empty_nat_bits); 2971 if (valid == NAT_ENTRY_PER_BLOCK) 2972 __set_bit_le(nat_ofs, nm_i->full_nat_bits); 2973 else 2974 __clear_bit_le(nat_ofs, nm_i->full_nat_bits); 2975 } 2976 2977 static void update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid, 2978 struct page *page) 2979 { 2980 struct f2fs_nm_info *nm_i = NM_I(sbi); 2981 unsigned int nat_index = start_nid / NAT_ENTRY_PER_BLOCK; 2982 struct f2fs_nat_block *nat_blk = page_address(page); 2983 int valid = 0; 2984 int i = 0; 2985 2986 if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG)) 2987 return; 2988 2989 if (nat_index == 0) { 2990 valid = 1; 2991 i = 1; 2992 } 2993 for (; i < NAT_ENTRY_PER_BLOCK; i++) { 2994 if (le32_to_cpu(nat_blk->entries[i].block_addr) != NULL_ADDR) 2995 valid++; 2996 } 2997 2998 __update_nat_bits(nm_i, nat_index, valid); 2999 } 3000 3001 void f2fs_enable_nat_bits(struct f2fs_sb_info *sbi) 3002 { 3003 struct f2fs_nm_info *nm_i = NM_I(sbi); 3004 unsigned int nat_ofs; 3005 3006 f2fs_down_read(&nm_i->nat_tree_lock); 3007 3008 for (nat_ofs = 0; nat_ofs < nm_i->nat_blocks; nat_ofs++) { 3009 unsigned int valid = 0, nid_ofs = 0; 3010 3011 /* handle nid zero due to it should never be used */ 3012 if (unlikely(nat_ofs == 0)) { 3013 valid = 1; 3014 nid_ofs = 1; 3015 } 3016 3017 for (; nid_ofs < NAT_ENTRY_PER_BLOCK; nid_ofs++) { 3018 if (!test_bit_le(nid_ofs, 3019 nm_i->free_nid_bitmap[nat_ofs])) 3020 valid++; 3021 } 3022 3023 __update_nat_bits(nm_i, nat_ofs, valid); 3024 } 3025 3026 f2fs_up_read(&nm_i->nat_tree_lock); 3027 } 3028 3029 static int __flush_nat_entry_set(struct f2fs_sb_info *sbi, 3030 struct nat_entry_set *set, struct cp_control *cpc) 3031 { 3032 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 3033 struct f2fs_journal *journal = curseg->journal; 3034 nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK; 3035 bool to_journal = true; 3036 struct f2fs_nat_block *nat_blk; 3037 struct nat_entry *ne, *cur; 3038 struct page *page = NULL; 3039 3040 /* 3041 * there are two steps to flush nat entries: 3042 * #1, flush nat entries to journal in current hot data summary block. 3043 * #2, flush nat entries to nat page. 3044 */ 3045 if ((cpc->reason & CP_UMOUNT) || 3046 !__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL)) 3047 to_journal = false; 3048 3049 if (to_journal) { 3050 down_write(&curseg->journal_rwsem); 3051 } else { 3052 page = get_next_nat_page(sbi, start_nid); 3053 if (IS_ERR(page)) 3054 return PTR_ERR(page); 3055 3056 nat_blk = page_address(page); 3057 f2fs_bug_on(sbi, !nat_blk); 3058 } 3059 3060 /* flush dirty nats in nat entry set */ 3061 list_for_each_entry_safe(ne, cur, &set->entry_list, list) { 3062 struct f2fs_nat_entry *raw_ne; 3063 nid_t nid = nat_get_nid(ne); 3064 int offset; 3065 3066 f2fs_bug_on(sbi, nat_get_blkaddr(ne) == NEW_ADDR); 3067 3068 if (to_journal) { 3069 offset = f2fs_lookup_journal_in_cursum(journal, 3070 NAT_JOURNAL, nid, 1); 3071 f2fs_bug_on(sbi, offset < 0); 3072 raw_ne = &nat_in_journal(journal, offset); 3073 nid_in_journal(journal, offset) = cpu_to_le32(nid); 3074 } else { 3075 raw_ne = &nat_blk->entries[nid - start_nid]; 3076 } 3077 raw_nat_from_node_info(raw_ne, &ne->ni); 3078 nat_reset_flag(ne); 3079 __clear_nat_cache_dirty(NM_I(sbi), set, ne); 3080 if (nat_get_blkaddr(ne) == NULL_ADDR) { 3081 add_free_nid(sbi, nid, false, true); 3082 } else { 3083 spin_lock(&NM_I(sbi)->nid_list_lock); 3084 update_free_nid_bitmap(sbi, nid, false, false); 3085 spin_unlock(&NM_I(sbi)->nid_list_lock); 3086 } 3087 } 3088 3089 if (to_journal) { 3090 up_write(&curseg->journal_rwsem); 3091 } else { 3092 update_nat_bits(sbi, start_nid, page); 3093 f2fs_put_page(page, 1); 3094 } 3095 3096 /* Allow dirty nats by node block allocation in write_begin */ 3097 if (!set->entry_cnt) { 3098 radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set); 3099 kmem_cache_free(nat_entry_set_slab, set); 3100 } 3101 return 0; 3102 } 3103 3104 /* 3105 * This function is called during the checkpointing process. 3106 */ 3107 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) 3108 { 3109 struct f2fs_nm_info *nm_i = NM_I(sbi); 3110 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 3111 struct f2fs_journal *journal = curseg->journal; 3112 struct nat_entry_set *setvec[NAT_VEC_SIZE]; 3113 struct nat_entry_set *set, *tmp; 3114 unsigned int found; 3115 nid_t set_idx = 0; 3116 LIST_HEAD(sets); 3117 int err = 0; 3118 3119 /* 3120 * during unmount, let's flush nat_bits before checking 3121 * nat_cnt[DIRTY_NAT]. 3122 */ 3123 if (cpc->reason & CP_UMOUNT) { 3124 f2fs_down_write(&nm_i->nat_tree_lock); 3125 remove_nats_in_journal(sbi); 3126 f2fs_up_write(&nm_i->nat_tree_lock); 3127 } 3128 3129 if (!nm_i->nat_cnt[DIRTY_NAT]) 3130 return 0; 3131 3132 f2fs_down_write(&nm_i->nat_tree_lock); 3133 3134 /* 3135 * if there are no enough space in journal to store dirty nat 3136 * entries, remove all entries from journal and merge them 3137 * into nat entry set. 3138 */ 3139 if (cpc->reason & CP_UMOUNT || 3140 !__has_cursum_space(journal, 3141 nm_i->nat_cnt[DIRTY_NAT], NAT_JOURNAL)) 3142 remove_nats_in_journal(sbi); 3143 3144 while ((found = __gang_lookup_nat_set(nm_i, 3145 set_idx, NAT_VEC_SIZE, setvec))) { 3146 unsigned idx; 3147 3148 set_idx = setvec[found - 1]->set + 1; 3149 for (idx = 0; idx < found; idx++) 3150 __adjust_nat_entry_set(setvec[idx], &sets, 3151 MAX_NAT_JENTRIES(journal)); 3152 } 3153 3154 /* flush dirty nats in nat entry set */ 3155 list_for_each_entry_safe(set, tmp, &sets, set_list) { 3156 err = __flush_nat_entry_set(sbi, set, cpc); 3157 if (err) 3158 break; 3159 } 3160 3161 f2fs_up_write(&nm_i->nat_tree_lock); 3162 /* Allow dirty nats by node block allocation in write_begin */ 3163 3164 return err; 3165 } 3166 3167 static int __get_nat_bitmaps(struct f2fs_sb_info *sbi) 3168 { 3169 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi); 3170 struct f2fs_nm_info *nm_i = NM_I(sbi); 3171 unsigned int nat_bits_bytes = nm_i->nat_blocks / BITS_PER_BYTE; 3172 unsigned int i; 3173 __u64 cp_ver = cur_cp_version(ckpt); 3174 block_t nat_bits_addr; 3175 3176 nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8); 3177 nm_i->nat_bits = f2fs_kvzalloc(sbi, 3178 F2FS_BLK_TO_BYTES(nm_i->nat_bits_blocks), GFP_KERNEL); 3179 if (!nm_i->nat_bits) 3180 return -ENOMEM; 3181 3182 nm_i->full_nat_bits = nm_i->nat_bits + 8; 3183 nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes; 3184 3185 if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG)) 3186 return 0; 3187 3188 nat_bits_addr = __start_cp_addr(sbi) + BLKS_PER_SEG(sbi) - 3189 nm_i->nat_bits_blocks; 3190 for (i = 0; i < nm_i->nat_bits_blocks; i++) { 3191 struct page *page; 3192 3193 page = f2fs_get_meta_page(sbi, nat_bits_addr++); 3194 if (IS_ERR(page)) 3195 return PTR_ERR(page); 3196 3197 memcpy(nm_i->nat_bits + F2FS_BLK_TO_BYTES(i), 3198 page_address(page), F2FS_BLKSIZE); 3199 f2fs_put_page(page, 1); 3200 } 3201 3202 cp_ver |= (cur_cp_crc(ckpt) << 32); 3203 if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) { 3204 clear_ckpt_flags(sbi, CP_NAT_BITS_FLAG); 3205 f2fs_notice(sbi, "Disable nat_bits due to incorrect cp_ver (%llu, %llu)", 3206 cp_ver, le64_to_cpu(*(__le64 *)nm_i->nat_bits)); 3207 return 0; 3208 } 3209 3210 f2fs_notice(sbi, "Found nat_bits in checkpoint"); 3211 return 0; 3212 } 3213 3214 static inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi) 3215 { 3216 struct f2fs_nm_info *nm_i = NM_I(sbi); 3217 unsigned int i = 0; 3218 nid_t nid, last_nid; 3219 3220 if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG)) 3221 return; 3222 3223 for (i = 0; i < nm_i->nat_blocks; i++) { 3224 i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i); 3225 if (i >= nm_i->nat_blocks) 3226 break; 3227 3228 __set_bit_le(i, nm_i->nat_block_bitmap); 3229 3230 nid = i * NAT_ENTRY_PER_BLOCK; 3231 last_nid = nid + NAT_ENTRY_PER_BLOCK; 3232 3233 spin_lock(&NM_I(sbi)->nid_list_lock); 3234 for (; nid < last_nid; nid++) 3235 update_free_nid_bitmap(sbi, nid, true, true); 3236 spin_unlock(&NM_I(sbi)->nid_list_lock); 3237 } 3238 3239 for (i = 0; i < nm_i->nat_blocks; i++) { 3240 i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i); 3241 if (i >= nm_i->nat_blocks) 3242 break; 3243 3244 __set_bit_le(i, nm_i->nat_block_bitmap); 3245 } 3246 } 3247 3248 static int init_node_manager(struct f2fs_sb_info *sbi) 3249 { 3250 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi); 3251 struct f2fs_nm_info *nm_i = NM_I(sbi); 3252 unsigned char *version_bitmap; 3253 unsigned int nat_segs; 3254 int err; 3255 3256 nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr); 3257 3258 /* segment_count_nat includes pair segment so divide to 2. */ 3259 nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1; 3260 nm_i->nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg); 3261 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks; 3262 3263 /* not used nids: 0, node, meta, (and root counted as valid node) */ 3264 nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count - 3265 F2FS_RESERVED_NODE_NUM; 3266 nm_i->nid_cnt[FREE_NID] = 0; 3267 nm_i->nid_cnt[PREALLOC_NID] = 0; 3268 nm_i->ram_thresh = DEF_RAM_THRESHOLD; 3269 nm_i->ra_nid_pages = DEF_RA_NID_PAGES; 3270 nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD; 3271 nm_i->max_rf_node_blocks = DEF_RF_NODE_BLOCKS; 3272 3273 INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC); 3274 INIT_LIST_HEAD(&nm_i->free_nid_list); 3275 INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO); 3276 INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO); 3277 INIT_LIST_HEAD(&nm_i->nat_entries); 3278 spin_lock_init(&nm_i->nat_list_lock); 3279 3280 mutex_init(&nm_i->build_lock); 3281 spin_lock_init(&nm_i->nid_list_lock); 3282 init_f2fs_rwsem(&nm_i->nat_tree_lock); 3283 3284 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); 3285 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP); 3286 version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP); 3287 nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size, 3288 GFP_KERNEL); 3289 if (!nm_i->nat_bitmap) 3290 return -ENOMEM; 3291 3292 err = __get_nat_bitmaps(sbi); 3293 if (err) 3294 return err; 3295 3296 #ifdef CONFIG_F2FS_CHECK_FS 3297 nm_i->nat_bitmap_mir = kmemdup(version_bitmap, nm_i->bitmap_size, 3298 GFP_KERNEL); 3299 if (!nm_i->nat_bitmap_mir) 3300 return -ENOMEM; 3301 #endif 3302 3303 return 0; 3304 } 3305 3306 static int init_free_nid_cache(struct f2fs_sb_info *sbi) 3307 { 3308 struct f2fs_nm_info *nm_i = NM_I(sbi); 3309 int i; 3310 3311 nm_i->free_nid_bitmap = 3312 f2fs_kvzalloc(sbi, array_size(sizeof(unsigned char *), 3313 nm_i->nat_blocks), 3314 GFP_KERNEL); 3315 if (!nm_i->free_nid_bitmap) 3316 return -ENOMEM; 3317 3318 for (i = 0; i < nm_i->nat_blocks; i++) { 3319 nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi, 3320 f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK), GFP_KERNEL); 3321 if (!nm_i->free_nid_bitmap[i]) 3322 return -ENOMEM; 3323 } 3324 3325 nm_i->nat_block_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks / 8, 3326 GFP_KERNEL); 3327 if (!nm_i->nat_block_bitmap) 3328 return -ENOMEM; 3329 3330 nm_i->free_nid_count = 3331 f2fs_kvzalloc(sbi, array_size(sizeof(unsigned short), 3332 nm_i->nat_blocks), 3333 GFP_KERNEL); 3334 if (!nm_i->free_nid_count) 3335 return -ENOMEM; 3336 return 0; 3337 } 3338 3339 int f2fs_build_node_manager(struct f2fs_sb_info *sbi) 3340 { 3341 int err; 3342 3343 sbi->nm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_nm_info), 3344 GFP_KERNEL); 3345 if (!sbi->nm_info) 3346 return -ENOMEM; 3347 3348 err = init_node_manager(sbi); 3349 if (err) 3350 return err; 3351 3352 err = init_free_nid_cache(sbi); 3353 if (err) 3354 return err; 3355 3356 /* load free nid status from nat_bits table */ 3357 load_free_nid_bitmap(sbi); 3358 3359 return f2fs_build_free_nids(sbi, true, true); 3360 } 3361 3362 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi) 3363 { 3364 struct f2fs_nm_info *nm_i = NM_I(sbi); 3365 struct free_nid *i, *next_i; 3366 void *vec[NAT_VEC_SIZE]; 3367 struct nat_entry **natvec = (struct nat_entry **)vec; 3368 struct nat_entry_set **setvec = (struct nat_entry_set **)vec; 3369 nid_t nid = 0; 3370 unsigned int found; 3371 3372 if (!nm_i) 3373 return; 3374 3375 /* destroy free nid list */ 3376 spin_lock(&nm_i->nid_list_lock); 3377 list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) { 3378 __remove_free_nid(sbi, i, FREE_NID); 3379 spin_unlock(&nm_i->nid_list_lock); 3380 kmem_cache_free(free_nid_slab, i); 3381 spin_lock(&nm_i->nid_list_lock); 3382 } 3383 f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID]); 3384 f2fs_bug_on(sbi, nm_i->nid_cnt[PREALLOC_NID]); 3385 f2fs_bug_on(sbi, !list_empty(&nm_i->free_nid_list)); 3386 spin_unlock(&nm_i->nid_list_lock); 3387 3388 /* destroy nat cache */ 3389 f2fs_down_write(&nm_i->nat_tree_lock); 3390 while ((found = __gang_lookup_nat_cache(nm_i, 3391 nid, NAT_VEC_SIZE, natvec))) { 3392 unsigned idx; 3393 3394 nid = nat_get_nid(natvec[found - 1]) + 1; 3395 for (idx = 0; idx < found; idx++) { 3396 spin_lock(&nm_i->nat_list_lock); 3397 list_del(&natvec[idx]->list); 3398 spin_unlock(&nm_i->nat_list_lock); 3399 3400 __del_from_nat_cache(nm_i, natvec[idx]); 3401 } 3402 } 3403 f2fs_bug_on(sbi, nm_i->nat_cnt[TOTAL_NAT]); 3404 3405 /* destroy nat set cache */ 3406 nid = 0; 3407 memset(vec, 0, sizeof(void *) * NAT_VEC_SIZE); 3408 while ((found = __gang_lookup_nat_set(nm_i, 3409 nid, NAT_VEC_SIZE, setvec))) { 3410 unsigned idx; 3411 3412 nid = setvec[found - 1]->set + 1; 3413 for (idx = 0; idx < found; idx++) { 3414 /* entry_cnt is not zero, when cp_error was occurred */ 3415 f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list)); 3416 radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set); 3417 kmem_cache_free(nat_entry_set_slab, setvec[idx]); 3418 } 3419 } 3420 f2fs_up_write(&nm_i->nat_tree_lock); 3421 3422 kvfree(nm_i->nat_block_bitmap); 3423 if (nm_i->free_nid_bitmap) { 3424 int i; 3425 3426 for (i = 0; i < nm_i->nat_blocks; i++) 3427 kvfree(nm_i->free_nid_bitmap[i]); 3428 kvfree(nm_i->free_nid_bitmap); 3429 } 3430 kvfree(nm_i->free_nid_count); 3431 3432 kvfree(nm_i->nat_bitmap); 3433 kvfree(nm_i->nat_bits); 3434 #ifdef CONFIG_F2FS_CHECK_FS 3435 kvfree(nm_i->nat_bitmap_mir); 3436 #endif 3437 sbi->nm_info = NULL; 3438 kfree(nm_i); 3439 } 3440 3441 int __init f2fs_create_node_manager_caches(void) 3442 { 3443 nat_entry_slab = f2fs_kmem_cache_create("f2fs_nat_entry", 3444 sizeof(struct nat_entry)); 3445 if (!nat_entry_slab) 3446 goto fail; 3447 3448 free_nid_slab = f2fs_kmem_cache_create("f2fs_free_nid", 3449 sizeof(struct free_nid)); 3450 if (!free_nid_slab) 3451 goto destroy_nat_entry; 3452 3453 nat_entry_set_slab = f2fs_kmem_cache_create("f2fs_nat_entry_set", 3454 sizeof(struct nat_entry_set)); 3455 if (!nat_entry_set_slab) 3456 goto destroy_free_nid; 3457 3458 fsync_node_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_node_entry", 3459 sizeof(struct fsync_node_entry)); 3460 if (!fsync_node_entry_slab) 3461 goto destroy_nat_entry_set; 3462 return 0; 3463 3464 destroy_nat_entry_set: 3465 kmem_cache_destroy(nat_entry_set_slab); 3466 destroy_free_nid: 3467 kmem_cache_destroy(free_nid_slab); 3468 destroy_nat_entry: 3469 kmem_cache_destroy(nat_entry_slab); 3470 fail: 3471 return -ENOMEM; 3472 } 3473 3474 void f2fs_destroy_node_manager_caches(void) 3475 { 3476 kmem_cache_destroy(fsync_node_entry_slab); 3477 kmem_cache_destroy(nat_entry_set_slab); 3478 kmem_cache_destroy(free_nid_slab); 3479 kmem_cache_destroy(nat_entry_slab); 3480 } 3481