1 // SPDX-License-Identifier: GPL-2.0 2 #include "bcachefs.h" 3 #include "bbpos.h" 4 #include "alloc_background.h" 5 #include "backpointers.h" 6 #include "bkey_buf.h" 7 #include "btree_cache.h" 8 #include "btree_update.h" 9 #include "btree_update_interior.h" 10 #include "btree_write_buffer.h" 11 #include "checksum.h" 12 #include "disk_accounting.h" 13 #include "error.h" 14 15 #include <linux/mm.h> 16 17 int bch2_backpointer_validate(struct bch_fs *c, struct bkey_s_c k, 18 struct bkey_validate_context from) 19 { 20 struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k); 21 int ret = 0; 22 23 bkey_fsck_err_on(bp.v->level > BTREE_MAX_DEPTH, 24 c, backpointer_level_bad, 25 "backpointer level bad: %u >= %u", 26 bp.v->level, BTREE_MAX_DEPTH); 27 28 bkey_fsck_err_on(bp.k->p.inode == BCH_SB_MEMBER_INVALID, 29 c, backpointer_dev_bad, 30 "backpointer for BCH_SB_MEMBER_INVALID"); 31 fsck_err: 32 return ret; 33 } 34 35 void bch2_backpointer_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k) 36 { 37 struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k); 38 39 rcu_read_lock(); 40 struct bch_dev *ca = bch2_dev_rcu_noerror(c, bp.k->p.inode); 41 if (ca) { 42 u32 bucket_offset; 43 struct bpos bucket = bp_pos_to_bucket_and_offset(ca, bp.k->p, &bucket_offset); 44 rcu_read_unlock(); 45 prt_printf(out, "bucket=%llu:%llu:%u ", bucket.inode, bucket.offset, bucket_offset); 46 } else { 47 rcu_read_unlock(); 48 prt_printf(out, "sector=%llu:%llu ", bp.k->p.inode, bp.k->p.offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT); 49 } 50 51 bch2_btree_id_level_to_text(out, bp.v->btree_id, bp.v->level); 52 prt_printf(out, " suboffset=%u len=%u gen=%u pos=", 53 (u32) bp.k->p.offset & ~(~0U << MAX_EXTENT_COMPRESS_RATIO_SHIFT), 54 bp.v->bucket_len, 55 bp.v->bucket_gen); 56 bch2_bpos_to_text(out, bp.v->pos); 57 } 58 59 void bch2_backpointer_swab(struct bkey_s k) 60 { 61 struct bkey_s_backpointer bp = bkey_s_to_backpointer(k); 62 63 bp.v->bucket_len = swab32(bp.v->bucket_len); 64 bch2_bpos_swab(&bp.v->pos); 65 } 66 67 static bool extent_matches_bp(struct bch_fs *c, 68 enum btree_id btree_id, unsigned level, 69 struct bkey_s_c k, 70 struct bkey_s_c_backpointer bp) 71 { 72 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 73 const union bch_extent_entry *entry; 74 struct extent_ptr_decoded p; 75 76 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { 77 struct bkey_i_backpointer bp2; 78 bch2_extent_ptr_to_bp(c, btree_id, level, k, p, entry, &bp2); 79 80 if (bpos_eq(bp.k->p, bp2.k.p) && 81 !memcmp(bp.v, &bp2.v, sizeof(bp2.v))) 82 return true; 83 } 84 85 return false; 86 } 87 88 static noinline int backpointer_mod_err(struct btree_trans *trans, 89 struct bkey_s_c orig_k, 90 struct bkey_i_backpointer *new_bp, 91 struct bkey_s_c found_bp, 92 bool insert) 93 { 94 struct bch_fs *c = trans->c; 95 struct printbuf buf = PRINTBUF; 96 97 if (insert) { 98 prt_printf(&buf, "existing backpointer found when inserting "); 99 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&new_bp->k_i)); 100 prt_newline(&buf); 101 printbuf_indent_add(&buf, 2); 102 103 prt_printf(&buf, "found "); 104 bch2_bkey_val_to_text(&buf, c, found_bp); 105 prt_newline(&buf); 106 107 prt_printf(&buf, "for "); 108 bch2_bkey_val_to_text(&buf, c, orig_k); 109 110 bch_err(c, "%s", buf.buf); 111 } else if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_extents_to_backpointers) { 112 prt_printf(&buf, "backpointer not found when deleting\n"); 113 printbuf_indent_add(&buf, 2); 114 115 prt_printf(&buf, "searching for "); 116 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&new_bp->k_i)); 117 prt_newline(&buf); 118 119 prt_printf(&buf, "got "); 120 bch2_bkey_val_to_text(&buf, c, found_bp); 121 prt_newline(&buf); 122 123 prt_printf(&buf, "for "); 124 bch2_bkey_val_to_text(&buf, c, orig_k); 125 126 bch_err(c, "%s", buf.buf); 127 } 128 129 printbuf_exit(&buf); 130 131 if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_extents_to_backpointers) { 132 return bch2_inconsistent_error(c) ? BCH_ERR_erofs_unfixed_errors : 0; 133 } else { 134 return 0; 135 } 136 } 137 138 int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *trans, 139 struct bkey_s_c orig_k, 140 struct bkey_i_backpointer *bp, 141 bool insert) 142 { 143 struct btree_iter bp_iter; 144 struct bkey_s_c k = bch2_bkey_get_iter(trans, &bp_iter, BTREE_ID_backpointers, 145 bp->k.p, 146 BTREE_ITER_intent| 147 BTREE_ITER_slots| 148 BTREE_ITER_with_updates); 149 int ret = bkey_err(k); 150 if (ret) 151 return ret; 152 153 if (insert 154 ? k.k->type 155 : (k.k->type != KEY_TYPE_backpointer || 156 memcmp(bkey_s_c_to_backpointer(k).v, &bp->v, sizeof(bp->v)))) { 157 ret = backpointer_mod_err(trans, orig_k, bp, k, insert); 158 if (ret) 159 goto err; 160 } 161 162 if (!insert) { 163 bp->k.type = KEY_TYPE_deleted; 164 set_bkey_val_u64s(&bp->k, 0); 165 } 166 167 ret = bch2_trans_update(trans, &bp_iter, &bp->k_i, 0); 168 err: 169 bch2_trans_iter_exit(trans, &bp_iter); 170 return ret; 171 } 172 173 static int bch2_backpointer_del(struct btree_trans *trans, struct bpos pos) 174 { 175 return (likely(!bch2_backpointers_no_use_write_buffer) 176 ? bch2_btree_delete_at_buffered(trans, BTREE_ID_backpointers, pos) 177 : bch2_btree_delete(trans, BTREE_ID_backpointers, pos, 0)) ?: 178 bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc); 179 } 180 181 static inline int bch2_backpointers_maybe_flush(struct btree_trans *trans, 182 struct bkey_s_c visiting_k, 183 struct bkey_buf *last_flushed) 184 { 185 return likely(!bch2_backpointers_no_use_write_buffer) 186 ? bch2_btree_write_buffer_maybe_flush(trans, visiting_k, last_flushed) 187 : 0; 188 } 189 190 static int backpointer_target_not_found(struct btree_trans *trans, 191 struct bkey_s_c_backpointer bp, 192 struct bkey_s_c target_k, 193 struct bkey_buf *last_flushed) 194 { 195 struct bch_fs *c = trans->c; 196 struct printbuf buf = PRINTBUF; 197 int ret = 0; 198 199 /* 200 * If we're using the btree write buffer, the backpointer we were 201 * looking at may have already been deleted - failure to find what it 202 * pointed to is not an error: 203 */ 204 ret = last_flushed 205 ? bch2_backpointers_maybe_flush(trans, bp.s_c, last_flushed) 206 : 0; 207 if (ret) 208 return ret; 209 210 prt_printf(&buf, "backpointer doesn't match %s it points to:\n ", 211 bp.v->level ? "btree node" : "extent"); 212 bch2_bkey_val_to_text(&buf, c, bp.s_c); 213 214 prt_printf(&buf, "\n "); 215 bch2_bkey_val_to_text(&buf, c, target_k); 216 217 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(target_k); 218 const union bch_extent_entry *entry; 219 struct extent_ptr_decoded p; 220 bkey_for_each_ptr_decode(target_k.k, ptrs, p, entry) 221 if (p.ptr.dev == bp.k->p.inode) { 222 prt_printf(&buf, "\n "); 223 struct bkey_i_backpointer bp2; 224 bch2_extent_ptr_to_bp(c, bp.v->btree_id, bp.v->level, target_k, p, entry, &bp2); 225 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&bp2.k_i)); 226 } 227 228 if (fsck_err(trans, backpointer_to_missing_ptr, 229 "%s", buf.buf)) 230 ret = bch2_backpointer_del(trans, bp.k->p); 231 fsck_err: 232 printbuf_exit(&buf); 233 return ret; 234 } 235 236 struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *trans, 237 struct bkey_s_c_backpointer bp, 238 struct btree_iter *iter, 239 unsigned iter_flags, 240 struct bkey_buf *last_flushed) 241 { 242 struct bch_fs *c = trans->c; 243 244 if (unlikely(bp.v->btree_id >= btree_id_nr_alive(c))) 245 return bkey_s_c_null; 246 247 bch2_trans_node_iter_init(trans, iter, 248 bp.v->btree_id, 249 bp.v->pos, 250 0, 251 bp.v->level, 252 iter_flags); 253 struct bkey_s_c k = bch2_btree_iter_peek_slot(iter); 254 if (bkey_err(k)) { 255 bch2_trans_iter_exit(trans, iter); 256 return k; 257 } 258 259 if (k.k && 260 extent_matches_bp(c, bp.v->btree_id, bp.v->level, k, bp)) 261 return k; 262 263 bch2_trans_iter_exit(trans, iter); 264 265 if (!bp.v->level) { 266 int ret = backpointer_target_not_found(trans, bp, k, last_flushed); 267 return ret ? bkey_s_c_err(ret) : bkey_s_c_null; 268 } else { 269 struct btree *b = bch2_backpointer_get_node(trans, bp, iter, last_flushed); 270 if (b == ERR_PTR(-BCH_ERR_backpointer_to_overwritten_btree_node)) 271 return bkey_s_c_null; 272 if (IS_ERR_OR_NULL(b)) 273 return ((struct bkey_s_c) { .k = ERR_CAST(b) }); 274 275 return bkey_i_to_s_c(&b->key); 276 } 277 } 278 279 struct btree *bch2_backpointer_get_node(struct btree_trans *trans, 280 struct bkey_s_c_backpointer bp, 281 struct btree_iter *iter, 282 struct bkey_buf *last_flushed) 283 { 284 struct bch_fs *c = trans->c; 285 286 BUG_ON(!bp.v->level); 287 288 bch2_trans_node_iter_init(trans, iter, 289 bp.v->btree_id, 290 bp.v->pos, 291 0, 292 bp.v->level - 1, 293 0); 294 struct btree *b = bch2_btree_iter_peek_node(iter); 295 if (IS_ERR_OR_NULL(b)) 296 goto err; 297 298 BUG_ON(b->c.level != bp.v->level - 1); 299 300 if (extent_matches_bp(c, bp.v->btree_id, bp.v->level, 301 bkey_i_to_s_c(&b->key), bp)) 302 return b; 303 304 if (btree_node_will_make_reachable(b)) { 305 b = ERR_PTR(-BCH_ERR_backpointer_to_overwritten_btree_node); 306 } else { 307 int ret = backpointer_target_not_found(trans, bp, bkey_i_to_s_c(&b->key), last_flushed); 308 b = ret ? ERR_PTR(ret) : NULL; 309 } 310 err: 311 bch2_trans_iter_exit(trans, iter); 312 return b; 313 } 314 315 static int bch2_check_backpointer_has_valid_bucket(struct btree_trans *trans, struct bkey_s_c k, 316 struct bkey_buf *last_flushed) 317 { 318 if (k.k->type != KEY_TYPE_backpointer) 319 return 0; 320 321 struct bch_fs *c = trans->c; 322 struct btree_iter alloc_iter = { NULL }; 323 struct bkey_s_c alloc_k; 324 struct printbuf buf = PRINTBUF; 325 int ret = 0; 326 327 struct bpos bucket; 328 if (!bp_pos_to_bucket_nodev_noerror(c, k.k->p, &bucket)) { 329 ret = bch2_backpointers_maybe_flush(trans, k, last_flushed); 330 if (ret) 331 goto out; 332 333 if (fsck_err(trans, backpointer_to_missing_device, 334 "backpointer for missing device:\n%s", 335 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) 336 ret = bch2_backpointer_del(trans, k.k->p); 337 goto out; 338 } 339 340 alloc_k = bch2_bkey_get_iter(trans, &alloc_iter, BTREE_ID_alloc, bucket, 0); 341 ret = bkey_err(alloc_k); 342 if (ret) 343 goto out; 344 345 if (alloc_k.k->type != KEY_TYPE_alloc_v4) { 346 ret = bch2_backpointers_maybe_flush(trans, k, last_flushed); 347 if (ret) 348 goto out; 349 350 if (fsck_err(trans, backpointer_to_missing_alloc, 351 "backpointer for nonexistent alloc key: %llu:%llu:0\n%s", 352 alloc_iter.pos.inode, alloc_iter.pos.offset, 353 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) 354 ret = bch2_backpointer_del(trans, k.k->p); 355 } 356 out: 357 fsck_err: 358 bch2_trans_iter_exit(trans, &alloc_iter); 359 printbuf_exit(&buf); 360 return ret; 361 } 362 363 /* verify that every backpointer has a corresponding alloc key */ 364 int bch2_check_btree_backpointers(struct bch_fs *c) 365 { 366 struct bkey_buf last_flushed; 367 bch2_bkey_buf_init(&last_flushed); 368 bkey_init(&last_flushed.k->k); 369 370 int ret = bch2_trans_run(c, 371 for_each_btree_key_commit(trans, iter, 372 BTREE_ID_backpointers, POS_MIN, 0, k, 373 NULL, NULL, BCH_TRANS_COMMIT_no_enospc, 374 bch2_check_backpointer_has_valid_bucket(trans, k, &last_flushed))); 375 376 bch2_bkey_buf_exit(&last_flushed, c); 377 bch_err_fn(c, ret); 378 return ret; 379 } 380 381 struct extents_to_bp_state { 382 struct bpos bp_start; 383 struct bpos bp_end; 384 struct bkey_buf last_flushed; 385 }; 386 387 static int drop_dev_and_update(struct btree_trans *trans, enum btree_id btree, 388 struct bkey_s_c extent, unsigned dev) 389 { 390 struct bkey_i *n = bch2_bkey_make_mut_noupdate(trans, extent); 391 int ret = PTR_ERR_OR_ZERO(n); 392 if (ret) 393 return ret; 394 395 bch2_bkey_drop_device(bkey_i_to_s(n), dev); 396 return bch2_btree_insert_trans(trans, btree, n, 0); 397 } 398 399 static int check_extent_checksum(struct btree_trans *trans, 400 enum btree_id btree, struct bkey_s_c extent, 401 enum btree_id o_btree, struct bkey_s_c extent2, unsigned dev) 402 { 403 struct bch_fs *c = trans->c; 404 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(extent); 405 const union bch_extent_entry *entry; 406 struct extent_ptr_decoded p; 407 struct printbuf buf = PRINTBUF; 408 void *data_buf = NULL; 409 struct bio *bio = NULL; 410 size_t bytes; 411 int ret = 0; 412 413 if (bkey_is_btree_ptr(extent.k)) 414 return false; 415 416 bkey_for_each_ptr_decode(extent.k, ptrs, p, entry) 417 if (p.ptr.dev == dev) 418 goto found; 419 BUG(); 420 found: 421 if (!p.crc.csum_type) 422 return false; 423 424 bytes = p.crc.compressed_size << 9; 425 426 struct bch_dev *ca = bch2_dev_get_ioref(c, dev, READ); 427 if (!ca) 428 return false; 429 430 data_buf = kvmalloc(bytes, GFP_KERNEL); 431 if (!data_buf) { 432 ret = -ENOMEM; 433 goto err; 434 } 435 436 bio = bio_alloc(ca->disk_sb.bdev, buf_pages(data_buf, bytes), REQ_OP_READ, GFP_KERNEL); 437 bio->bi_iter.bi_sector = p.ptr.offset; 438 bch2_bio_map(bio, data_buf, bytes); 439 ret = submit_bio_wait(bio); 440 if (ret) 441 goto err; 442 443 prt_str(&buf, "extents pointing to same space, but first extent checksum bad:"); 444 prt_printf(&buf, "\n "); 445 bch2_btree_id_to_text(&buf, btree); 446 prt_str(&buf, " "); 447 bch2_bkey_val_to_text(&buf, c, extent); 448 prt_printf(&buf, "\n "); 449 bch2_btree_id_to_text(&buf, o_btree); 450 prt_str(&buf, " "); 451 bch2_bkey_val_to_text(&buf, c, extent2); 452 453 struct nonce nonce = extent_nonce(extent.k->bversion, p.crc); 454 struct bch_csum csum = bch2_checksum(c, p.crc.csum_type, nonce, data_buf, bytes); 455 if (fsck_err_on(bch2_crc_cmp(csum, p.crc.csum), 456 trans, dup_backpointer_to_bad_csum_extent, 457 "%s", buf.buf)) 458 ret = drop_dev_and_update(trans, btree, extent, dev) ?: 1; 459 fsck_err: 460 err: 461 if (bio) 462 bio_put(bio); 463 kvfree(data_buf); 464 percpu_ref_put(&ca->io_ref); 465 printbuf_exit(&buf); 466 return ret; 467 } 468 469 static int check_bp_exists(struct btree_trans *trans, 470 struct extents_to_bp_state *s, 471 struct bkey_i_backpointer *bp, 472 struct bkey_s_c orig_k) 473 { 474 struct bch_fs *c = trans->c; 475 struct btree_iter other_extent_iter = {}; 476 struct printbuf buf = PRINTBUF; 477 478 if (bpos_lt(bp->k.p, s->bp_start) || 479 bpos_gt(bp->k.p, s->bp_end)) 480 return 0; 481 482 struct btree_iter bp_iter; 483 struct bkey_s_c bp_k = bch2_bkey_get_iter(trans, &bp_iter, BTREE_ID_backpointers, bp->k.p, 0); 484 int ret = bkey_err(bp_k); 485 if (ret) 486 goto err; 487 488 if (bp_k.k->type != KEY_TYPE_backpointer || 489 memcmp(bkey_s_c_to_backpointer(bp_k).v, &bp->v, sizeof(bp->v))) { 490 ret = bch2_btree_write_buffer_maybe_flush(trans, orig_k, &s->last_flushed); 491 if (ret) 492 goto err; 493 494 goto check_existing_bp; 495 } 496 out: 497 err: 498 fsck_err: 499 bch2_trans_iter_exit(trans, &other_extent_iter); 500 bch2_trans_iter_exit(trans, &bp_iter); 501 printbuf_exit(&buf); 502 return ret; 503 check_existing_bp: 504 /* Do we have a backpointer for a different extent? */ 505 if (bp_k.k->type != KEY_TYPE_backpointer) 506 goto missing; 507 508 struct bkey_s_c_backpointer other_bp = bkey_s_c_to_backpointer(bp_k); 509 510 struct bkey_s_c other_extent = 511 bch2_backpointer_get_key(trans, other_bp, &other_extent_iter, 0, NULL); 512 ret = bkey_err(other_extent); 513 if (ret == -BCH_ERR_backpointer_to_overwritten_btree_node) 514 ret = 0; 515 if (ret) 516 goto err; 517 518 if (!other_extent.k) 519 goto missing; 520 521 rcu_read_lock(); 522 struct bch_dev *ca = bch2_dev_rcu_noerror(c, bp->k.p.inode); 523 if (ca) { 524 struct bkey_ptrs_c other_extent_ptrs = bch2_bkey_ptrs_c(other_extent); 525 bkey_for_each_ptr(other_extent_ptrs, ptr) 526 if (ptr->dev == bp->k.p.inode && 527 dev_ptr_stale_rcu(ca, ptr)) { 528 ret = drop_dev_and_update(trans, other_bp.v->btree_id, 529 other_extent, bp->k.p.inode); 530 if (ret) 531 goto err; 532 goto out; 533 } 534 } 535 rcu_read_unlock(); 536 537 if (bch2_extents_match(orig_k, other_extent)) { 538 printbuf_reset(&buf); 539 prt_printf(&buf, "duplicate versions of same extent, deleting smaller\n "); 540 bch2_bkey_val_to_text(&buf, c, orig_k); 541 prt_str(&buf, "\n "); 542 bch2_bkey_val_to_text(&buf, c, other_extent); 543 bch_err(c, "%s", buf.buf); 544 545 if (other_extent.k->size <= orig_k.k->size) { 546 ret = drop_dev_and_update(trans, other_bp.v->btree_id, 547 other_extent, bp->k.p.inode); 548 if (ret) 549 goto err; 550 goto out; 551 } else { 552 ret = drop_dev_and_update(trans, bp->v.btree_id, orig_k, bp->k.p.inode); 553 if (ret) 554 goto err; 555 goto missing; 556 } 557 } 558 559 ret = check_extent_checksum(trans, 560 other_bp.v->btree_id, other_extent, 561 bp->v.btree_id, orig_k, 562 bp->k.p.inode); 563 if (ret < 0) 564 goto err; 565 if (ret) { 566 ret = 0; 567 goto missing; 568 } 569 570 ret = check_extent_checksum(trans, bp->v.btree_id, orig_k, 571 other_bp.v->btree_id, other_extent, bp->k.p.inode); 572 if (ret < 0) 573 goto err; 574 if (ret) { 575 ret = 0; 576 goto out; 577 } 578 579 printbuf_reset(&buf); 580 prt_printf(&buf, "duplicate extents pointing to same space on dev %llu\n ", bp->k.p.inode); 581 bch2_bkey_val_to_text(&buf, c, orig_k); 582 prt_str(&buf, "\n "); 583 bch2_bkey_val_to_text(&buf, c, other_extent); 584 bch_err(c, "%s", buf.buf); 585 ret = -BCH_ERR_fsck_repair_unimplemented; 586 goto err; 587 missing: 588 printbuf_reset(&buf); 589 prt_str(&buf, "missing backpointer\n for: "); 590 bch2_bkey_val_to_text(&buf, c, orig_k); 591 prt_printf(&buf, "\n want: "); 592 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&bp->k_i)); 593 prt_printf(&buf, "\n got: "); 594 bch2_bkey_val_to_text(&buf, c, bp_k); 595 596 if (fsck_err(trans, ptr_to_missing_backpointer, "%s", buf.buf)) 597 ret = bch2_bucket_backpointer_mod(trans, orig_k, bp, true); 598 599 goto out; 600 } 601 602 static int check_extent_to_backpointers(struct btree_trans *trans, 603 struct extents_to_bp_state *s, 604 enum btree_id btree, unsigned level, 605 struct bkey_s_c k) 606 { 607 struct bch_fs *c = trans->c; 608 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); 609 const union bch_extent_entry *entry; 610 struct extent_ptr_decoded p; 611 612 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { 613 if (p.ptr.cached) 614 continue; 615 616 if (p.ptr.dev == BCH_SB_MEMBER_INVALID) 617 continue; 618 619 rcu_read_lock(); 620 struct bch_dev *ca = bch2_dev_rcu_noerror(c, p.ptr.dev); 621 bool check = ca && test_bit(PTR_BUCKET_NR(ca, &p.ptr), ca->bucket_backpointer_mismatches); 622 bool empty = ca && test_bit(PTR_BUCKET_NR(ca, &p.ptr), ca->bucket_backpointer_empty); 623 rcu_read_unlock(); 624 625 if (check || empty) { 626 struct bkey_i_backpointer bp; 627 bch2_extent_ptr_to_bp(c, btree, level, k, p, entry, &bp); 628 629 int ret = check 630 ? check_bp_exists(trans, s, &bp, k) 631 : bch2_bucket_backpointer_mod(trans, k, &bp, true); 632 if (ret) 633 return ret; 634 } 635 } 636 637 return 0; 638 } 639 640 static int check_btree_root_to_backpointers(struct btree_trans *trans, 641 struct extents_to_bp_state *s, 642 enum btree_id btree_id, 643 int *level) 644 { 645 struct bch_fs *c = trans->c; 646 struct btree_iter iter; 647 struct btree *b; 648 struct bkey_s_c k; 649 int ret; 650 retry: 651 bch2_trans_node_iter_init(trans, &iter, btree_id, POS_MIN, 652 0, bch2_btree_id_root(c, btree_id)->b->c.level, 0); 653 b = bch2_btree_iter_peek_node(&iter); 654 ret = PTR_ERR_OR_ZERO(b); 655 if (ret) 656 goto err; 657 658 if (b != btree_node_root(c, b)) { 659 bch2_trans_iter_exit(trans, &iter); 660 goto retry; 661 } 662 663 *level = b->c.level; 664 665 k = bkey_i_to_s_c(&b->key); 666 ret = check_extent_to_backpointers(trans, s, btree_id, b->c.level + 1, k); 667 err: 668 bch2_trans_iter_exit(trans, &iter); 669 return ret; 670 } 671 672 static inline struct bbpos bp_to_bbpos(struct bch_backpointer bp) 673 { 674 return (struct bbpos) { 675 .btree = bp.btree_id, 676 .pos = bp.pos, 677 }; 678 } 679 680 static u64 mem_may_pin_bytes(struct bch_fs *c) 681 { 682 struct sysinfo i; 683 si_meminfo(&i); 684 685 u64 mem_bytes = i.totalram * i.mem_unit; 686 return div_u64(mem_bytes * c->opts.fsck_memory_usage_percent, 100); 687 } 688 689 static size_t btree_nodes_fit_in_ram(struct bch_fs *c) 690 { 691 return div_u64(mem_may_pin_bytes(c), c->opts.btree_node_size); 692 } 693 694 static int bch2_get_btree_in_memory_pos(struct btree_trans *trans, 695 u64 btree_leaf_mask, 696 u64 btree_interior_mask, 697 struct bbpos start, struct bbpos *end) 698 { 699 struct bch_fs *c = trans->c; 700 s64 mem_may_pin = mem_may_pin_bytes(c); 701 int ret = 0; 702 703 bch2_btree_cache_unpin(c); 704 705 btree_interior_mask |= btree_leaf_mask; 706 707 c->btree_cache.pinned_nodes_mask[0] = btree_leaf_mask; 708 c->btree_cache.pinned_nodes_mask[1] = btree_interior_mask; 709 c->btree_cache.pinned_nodes_start = start; 710 c->btree_cache.pinned_nodes_end = *end = BBPOS_MAX; 711 712 for (enum btree_id btree = start.btree; 713 btree < BTREE_ID_NR && !ret; 714 btree++) { 715 unsigned depth = (BIT_ULL(btree) & btree_leaf_mask) ? 0 : 1; 716 717 if (!(BIT_ULL(btree) & btree_leaf_mask) && 718 !(BIT_ULL(btree) & btree_interior_mask)) 719 continue; 720 721 ret = __for_each_btree_node(trans, iter, btree, 722 btree == start.btree ? start.pos : POS_MIN, 723 0, depth, BTREE_ITER_prefetch, b, ({ 724 mem_may_pin -= btree_buf_bytes(b); 725 if (mem_may_pin <= 0) { 726 c->btree_cache.pinned_nodes_end = *end = 727 BBPOS(btree, b->key.k.p); 728 break; 729 } 730 bch2_node_pin(c, b); 731 0; 732 })); 733 } 734 735 return ret; 736 } 737 738 struct progress_indicator_state { 739 unsigned long next_print; 740 u64 nodes_seen; 741 u64 nodes_total; 742 struct btree *last_node; 743 }; 744 745 static inline void progress_init(struct progress_indicator_state *s, 746 struct bch_fs *c, 747 u64 btree_id_mask) 748 { 749 memset(s, 0, sizeof(*s)); 750 751 s->next_print = jiffies + HZ * 10; 752 753 for (unsigned i = 0; i < BTREE_ID_NR; i++) { 754 if (!(btree_id_mask & BIT_ULL(i))) 755 continue; 756 757 struct disk_accounting_pos acc = { 758 .type = BCH_DISK_ACCOUNTING_btree, 759 .btree.id = i, 760 }; 761 762 u64 v; 763 bch2_accounting_mem_read(c, disk_accounting_pos_to_bpos(&acc), &v, 1); 764 s->nodes_total += div64_ul(v, btree_sectors(c)); 765 } 766 } 767 768 static inline bool progress_update_p(struct progress_indicator_state *s) 769 { 770 bool ret = time_after_eq(jiffies, s->next_print); 771 772 if (ret) 773 s->next_print = jiffies + HZ * 10; 774 return ret; 775 } 776 777 static void progress_update_iter(struct btree_trans *trans, 778 struct progress_indicator_state *s, 779 struct btree_iter *iter, 780 const char *msg) 781 { 782 struct bch_fs *c = trans->c; 783 struct btree *b = path_l(btree_iter_path(trans, iter))->b; 784 785 s->nodes_seen += b != s->last_node; 786 s->last_node = b; 787 788 if (progress_update_p(s)) { 789 struct printbuf buf = PRINTBUF; 790 unsigned percent = s->nodes_total 791 ? div64_u64(s->nodes_seen * 100, s->nodes_total) 792 : 0; 793 794 prt_printf(&buf, "%s: %d%%, done %llu/%llu nodes, at ", 795 msg, percent, s->nodes_seen, s->nodes_total); 796 bch2_bbpos_to_text(&buf, BBPOS(iter->btree_id, iter->pos)); 797 798 bch_info(c, "%s", buf.buf); 799 printbuf_exit(&buf); 800 } 801 } 802 803 static int bch2_check_extents_to_backpointers_pass(struct btree_trans *trans, 804 struct extents_to_bp_state *s) 805 { 806 struct bch_fs *c = trans->c; 807 struct progress_indicator_state progress; 808 int ret = 0; 809 810 progress_init(&progress, trans->c, BIT_ULL(BTREE_ID_extents)|BIT_ULL(BTREE_ID_reflink)); 811 812 for (enum btree_id btree_id = 0; 813 btree_id < btree_id_nr_alive(c); 814 btree_id++) { 815 int level, depth = btree_type_has_ptrs(btree_id) ? 0 : 1; 816 817 ret = commit_do(trans, NULL, NULL, 818 BCH_TRANS_COMMIT_no_enospc, 819 check_btree_root_to_backpointers(trans, s, btree_id, &level)); 820 if (ret) 821 return ret; 822 823 while (level >= depth) { 824 struct btree_iter iter; 825 bch2_trans_node_iter_init(trans, &iter, btree_id, POS_MIN, 0, level, 826 BTREE_ITER_prefetch); 827 828 ret = for_each_btree_key_continue(trans, iter, 0, k, ({ 829 progress_update_iter(trans, &progress, &iter, "extents_to_backpointers"); 830 check_extent_to_backpointers(trans, s, btree_id, level, k) ?: 831 bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc); 832 })); 833 if (ret) 834 return ret; 835 836 --level; 837 } 838 } 839 840 return 0; 841 } 842 843 enum alloc_sector_counter { 844 ALLOC_dirty, 845 ALLOC_cached, 846 ALLOC_stripe, 847 ALLOC_SECTORS_NR 848 }; 849 850 static enum alloc_sector_counter data_type_to_alloc_counter(enum bch_data_type t) 851 { 852 switch (t) { 853 case BCH_DATA_btree: 854 case BCH_DATA_user: 855 return ALLOC_dirty; 856 case BCH_DATA_cached: 857 return ALLOC_cached; 858 case BCH_DATA_stripe: 859 return ALLOC_stripe; 860 default: 861 BUG(); 862 } 863 } 864 865 static int check_bucket_backpointers_to_extents(struct btree_trans *, struct bch_dev *, struct bpos); 866 867 static int check_bucket_backpointer_mismatch(struct btree_trans *trans, struct bkey_s_c alloc_k, 868 struct bkey_buf *last_flushed) 869 { 870 struct bch_fs *c = trans->c; 871 struct bch_alloc_v4 a_convert; 872 const struct bch_alloc_v4 *a = bch2_alloc_to_v4(alloc_k, &a_convert); 873 bool need_commit = false; 874 875 if (a->data_type == BCH_DATA_sb || 876 a->data_type == BCH_DATA_journal || 877 a->data_type == BCH_DATA_parity) 878 return 0; 879 880 u32 sectors[ALLOC_SECTORS_NR]; 881 memset(sectors, 0, sizeof(sectors)); 882 883 struct bch_dev *ca = bch2_dev_bucket_tryget_noerror(trans->c, alloc_k.k->p); 884 if (!ca) 885 return 0; 886 887 struct btree_iter iter; 888 struct bkey_s_c bp_k; 889 int ret = 0; 890 for_each_btree_key_max_norestart(trans, iter, BTREE_ID_backpointers, 891 bucket_pos_to_bp_start(ca, alloc_k.k->p), 892 bucket_pos_to_bp_end(ca, alloc_k.k->p), 0, bp_k, ret) { 893 if (bp_k.k->type != KEY_TYPE_backpointer) 894 continue; 895 896 struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(bp_k); 897 898 if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_backpointer_bucket_gen && 899 (bp.v->bucket_gen != a->gen || 900 bp.v->pad)) { 901 ret = bch2_backpointer_del(trans, bp_k.k->p); 902 if (ret) 903 break; 904 905 need_commit = true; 906 continue; 907 } 908 909 if (bp.v->bucket_gen != a->gen) 910 continue; 911 912 sectors[data_type_to_alloc_counter(bp.v->data_type)] += bp.v->bucket_len; 913 }; 914 bch2_trans_iter_exit(trans, &iter); 915 if (ret) 916 goto err; 917 918 if (need_commit) { 919 ret = bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc); 920 if (ret) 921 goto err; 922 } 923 924 /* Cached pointers don't have backpointers: */ 925 926 if (sectors[ALLOC_dirty] != a->dirty_sectors || 927 sectors[ALLOC_stripe] != a->stripe_sectors) { 928 if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_backpointer_bucket_gen) { 929 ret = bch2_backpointers_maybe_flush(trans, alloc_k, last_flushed); 930 if (ret) 931 goto err; 932 } 933 934 if (sectors[ALLOC_dirty] > a->dirty_sectors || 935 sectors[ALLOC_stripe] > a->stripe_sectors) { 936 ret = check_bucket_backpointers_to_extents(trans, ca, alloc_k.k->p) ?: 937 -BCH_ERR_transaction_restart_nested; 938 goto err; 939 } 940 941 if (!sectors[ALLOC_dirty] && 942 !sectors[ALLOC_stripe]) 943 __set_bit(alloc_k.k->p.offset, ca->bucket_backpointer_empty); 944 else 945 __set_bit(alloc_k.k->p.offset, ca->bucket_backpointer_mismatches); 946 } 947 err: 948 bch2_dev_put(ca); 949 return ret; 950 } 951 952 static bool backpointer_node_has_missing(struct bch_fs *c, struct bkey_s_c k) 953 { 954 switch (k.k->type) { 955 case KEY_TYPE_btree_ptr_v2: { 956 bool ret = false; 957 958 rcu_read_lock(); 959 struct bpos pos = bkey_s_c_to_btree_ptr_v2(k).v->min_key; 960 while (pos.inode <= k.k->p.inode) { 961 if (pos.inode >= c->sb.nr_devices) 962 break; 963 964 struct bch_dev *ca = bch2_dev_rcu_noerror(c, pos.inode); 965 if (!ca) 966 goto next; 967 968 struct bpos bucket = bp_pos_to_bucket(ca, pos); 969 bucket.offset = find_next_bit(ca->bucket_backpointer_mismatches, 970 ca->mi.nbuckets, bucket.offset); 971 if (bucket.offset == ca->mi.nbuckets) 972 goto next; 973 974 ret = bpos_le(bucket_pos_to_bp_end(ca, bucket), k.k->p); 975 if (ret) 976 break; 977 next: 978 pos = SPOS(pos.inode + 1, 0, 0); 979 } 980 rcu_read_unlock(); 981 982 return ret; 983 } 984 case KEY_TYPE_btree_ptr: 985 return true; 986 default: 987 return false; 988 } 989 } 990 991 static int btree_node_get_and_pin(struct btree_trans *trans, struct bkey_i *k, 992 enum btree_id btree, unsigned level) 993 { 994 struct btree_iter iter; 995 bch2_trans_node_iter_init(trans, &iter, btree, k->k.p, 0, level, 0); 996 struct btree *b = bch2_btree_iter_peek_node(&iter); 997 int ret = PTR_ERR_OR_ZERO(b); 998 if (ret) 999 goto err; 1000 1001 if (b) 1002 bch2_node_pin(trans->c, b); 1003 err: 1004 bch2_trans_iter_exit(trans, &iter); 1005 return ret; 1006 } 1007 1008 static int bch2_pin_backpointer_nodes_with_missing(struct btree_trans *trans, 1009 struct bpos start, struct bpos *end) 1010 { 1011 struct bch_fs *c = trans->c; 1012 int ret = 0; 1013 1014 struct bkey_buf tmp; 1015 bch2_bkey_buf_init(&tmp); 1016 1017 bch2_btree_cache_unpin(c); 1018 1019 *end = SPOS_MAX; 1020 1021 s64 mem_may_pin = mem_may_pin_bytes(c); 1022 struct btree_iter iter; 1023 bch2_trans_node_iter_init(trans, &iter, BTREE_ID_backpointers, start, 1024 0, 1, BTREE_ITER_prefetch); 1025 ret = for_each_btree_key_continue(trans, iter, 0, k, ({ 1026 if (!backpointer_node_has_missing(c, k)) 1027 continue; 1028 1029 mem_may_pin -= c->opts.btree_node_size; 1030 if (mem_may_pin <= 0) 1031 break; 1032 1033 bch2_bkey_buf_reassemble(&tmp, c, k); 1034 struct btree_path *path = btree_iter_path(trans, &iter); 1035 1036 BUG_ON(path->level != 1); 1037 1038 bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id, path->level - 1); 1039 })); 1040 if (ret) 1041 return ret; 1042 1043 struct bpos pinned = SPOS_MAX; 1044 mem_may_pin = mem_may_pin_bytes(c); 1045 bch2_trans_node_iter_init(trans, &iter, BTREE_ID_backpointers, start, 1046 0, 1, BTREE_ITER_prefetch); 1047 ret = for_each_btree_key_continue(trans, iter, 0, k, ({ 1048 if (!backpointer_node_has_missing(c, k)) 1049 continue; 1050 1051 mem_may_pin -= c->opts.btree_node_size; 1052 if (mem_may_pin <= 0) { 1053 *end = pinned; 1054 break; 1055 } 1056 1057 bch2_bkey_buf_reassemble(&tmp, c, k); 1058 struct btree_path *path = btree_iter_path(trans, &iter); 1059 1060 BUG_ON(path->level != 1); 1061 1062 int ret2 = btree_node_get_and_pin(trans, tmp.k, path->btree_id, path->level - 1); 1063 1064 if (!ret2) 1065 pinned = tmp.k->k.p; 1066 1067 ret; 1068 })); 1069 if (ret) 1070 return ret; 1071 1072 return ret; 1073 } 1074 1075 int bch2_check_extents_to_backpointers(struct bch_fs *c) 1076 { 1077 int ret = 0; 1078 1079 /* 1080 * Can't allow devices to come/go/resize while we have bucket bitmaps 1081 * allocated 1082 */ 1083 lockdep_assert_held(&c->state_lock); 1084 1085 for_each_member_device(c, ca) { 1086 BUG_ON(ca->bucket_backpointer_mismatches); 1087 ca->bucket_backpointer_mismatches = kvcalloc(BITS_TO_LONGS(ca->mi.nbuckets), 1088 sizeof(unsigned long), 1089 GFP_KERNEL); 1090 ca->bucket_backpointer_empty = kvcalloc(BITS_TO_LONGS(ca->mi.nbuckets), 1091 sizeof(unsigned long), 1092 GFP_KERNEL); 1093 if (!ca->bucket_backpointer_mismatches || 1094 !ca->bucket_backpointer_empty) { 1095 bch2_dev_put(ca); 1096 ret = -BCH_ERR_ENOMEM_backpointer_mismatches_bitmap; 1097 goto err_free_bitmaps; 1098 } 1099 } 1100 1101 struct btree_trans *trans = bch2_trans_get(c); 1102 struct extents_to_bp_state s = { .bp_start = POS_MIN }; 1103 1104 bch2_bkey_buf_init(&s.last_flushed); 1105 bkey_init(&s.last_flushed.k->k); 1106 1107 ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, 1108 POS_MIN, BTREE_ITER_prefetch, k, ({ 1109 check_bucket_backpointer_mismatch(trans, k, &s.last_flushed); 1110 })); 1111 if (ret) 1112 goto err; 1113 1114 u64 nr_buckets = 0, nr_mismatches = 0, nr_empty = 0; 1115 for_each_member_device(c, ca) { 1116 nr_buckets += ca->mi.nbuckets; 1117 nr_mismatches += bitmap_weight(ca->bucket_backpointer_mismatches, ca->mi.nbuckets); 1118 nr_empty += bitmap_weight(ca->bucket_backpointer_empty, ca->mi.nbuckets); 1119 } 1120 1121 if (!nr_mismatches && !nr_empty) 1122 goto err; 1123 1124 bch_info(c, "scanning for missing backpointers in %llu/%llu buckets", 1125 nr_mismatches + nr_empty, nr_buckets); 1126 1127 while (1) { 1128 ret = bch2_pin_backpointer_nodes_with_missing(trans, s.bp_start, &s.bp_end); 1129 if (ret) 1130 break; 1131 1132 if ( bpos_eq(s.bp_start, POS_MIN) && 1133 !bpos_eq(s.bp_end, SPOS_MAX)) 1134 bch_verbose(c, "%s(): alloc info does not fit in ram, running in multiple passes with %zu nodes per pass", 1135 __func__, btree_nodes_fit_in_ram(c)); 1136 1137 if (!bpos_eq(s.bp_start, POS_MIN) || 1138 !bpos_eq(s.bp_end, SPOS_MAX)) { 1139 struct printbuf buf = PRINTBUF; 1140 1141 prt_str(&buf, "check_extents_to_backpointers(): "); 1142 bch2_bpos_to_text(&buf, s.bp_start); 1143 prt_str(&buf, "-"); 1144 bch2_bpos_to_text(&buf, s.bp_end); 1145 1146 bch_verbose(c, "%s", buf.buf); 1147 printbuf_exit(&buf); 1148 } 1149 1150 ret = bch2_check_extents_to_backpointers_pass(trans, &s); 1151 if (ret || bpos_eq(s.bp_end, SPOS_MAX)) 1152 break; 1153 1154 s.bp_start = bpos_successor(s.bp_end); 1155 } 1156 err: 1157 bch2_trans_put(trans); 1158 bch2_bkey_buf_exit(&s.last_flushed, c); 1159 bch2_btree_cache_unpin(c); 1160 err_free_bitmaps: 1161 for_each_member_device(c, ca) { 1162 kvfree(ca->bucket_backpointer_empty); 1163 ca->bucket_backpointer_empty = NULL; 1164 kvfree(ca->bucket_backpointer_mismatches); 1165 ca->bucket_backpointer_mismatches = NULL; 1166 } 1167 1168 bch_err_fn(c, ret); 1169 return ret; 1170 } 1171 1172 static int check_one_backpointer(struct btree_trans *trans, 1173 struct bbpos start, 1174 struct bbpos end, 1175 struct bkey_s_c bp_k, 1176 struct bkey_buf *last_flushed) 1177 { 1178 if (bp_k.k->type != KEY_TYPE_backpointer) 1179 return 0; 1180 1181 struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(bp_k); 1182 struct bbpos pos = bp_to_bbpos(*bp.v); 1183 1184 if (bbpos_cmp(pos, start) < 0 || 1185 bbpos_cmp(pos, end) > 0) 1186 return 0; 1187 1188 struct btree_iter iter; 1189 struct bkey_s_c k = bch2_backpointer_get_key(trans, bp, &iter, 0, last_flushed); 1190 int ret = bkey_err(k); 1191 if (ret == -BCH_ERR_backpointer_to_overwritten_btree_node) 1192 return 0; 1193 if (ret) 1194 return ret; 1195 1196 bch2_trans_iter_exit(trans, &iter); 1197 return ret; 1198 } 1199 1200 static int check_bucket_backpointers_to_extents(struct btree_trans *trans, 1201 struct bch_dev *ca, struct bpos bucket) 1202 { 1203 u32 restart_count = trans->restart_count; 1204 struct bkey_buf last_flushed; 1205 bch2_bkey_buf_init(&last_flushed); 1206 bkey_init(&last_flushed.k->k); 1207 1208 int ret = for_each_btree_key_max(trans, iter, BTREE_ID_backpointers, 1209 bucket_pos_to_bp_start(ca, bucket), 1210 bucket_pos_to_bp_end(ca, bucket), 1211 0, k, 1212 check_one_backpointer(trans, BBPOS_MIN, BBPOS_MAX, k, &last_flushed) 1213 ); 1214 1215 bch2_bkey_buf_exit(&last_flushed, trans->c); 1216 return ret ?: trans_was_restarted(trans, restart_count); 1217 } 1218 1219 static int bch2_check_backpointers_to_extents_pass(struct btree_trans *trans, 1220 struct bbpos start, 1221 struct bbpos end) 1222 { 1223 struct bch_fs *c = trans->c; 1224 struct bkey_buf last_flushed; 1225 struct progress_indicator_state progress; 1226 1227 bch2_bkey_buf_init(&last_flushed); 1228 bkey_init(&last_flushed.k->k); 1229 progress_init(&progress, trans->c, BIT_ULL(BTREE_ID_backpointers)); 1230 1231 int ret = for_each_btree_key(trans, iter, BTREE_ID_backpointers, 1232 POS_MIN, BTREE_ITER_prefetch, k, ({ 1233 progress_update_iter(trans, &progress, &iter, "backpointers_to_extents"); 1234 check_one_backpointer(trans, start, end, k, &last_flushed); 1235 })); 1236 1237 bch2_bkey_buf_exit(&last_flushed, c); 1238 return ret; 1239 } 1240 1241 int bch2_check_backpointers_to_extents(struct bch_fs *c) 1242 { 1243 struct btree_trans *trans = bch2_trans_get(c); 1244 struct bbpos start = (struct bbpos) { .btree = 0, .pos = POS_MIN, }, end; 1245 int ret; 1246 1247 while (1) { 1248 ret = bch2_get_btree_in_memory_pos(trans, 1249 BIT_ULL(BTREE_ID_extents)| 1250 BIT_ULL(BTREE_ID_reflink), 1251 ~0, 1252 start, &end); 1253 if (ret) 1254 break; 1255 1256 if (!bbpos_cmp(start, BBPOS_MIN) && 1257 bbpos_cmp(end, BBPOS_MAX)) 1258 bch_verbose(c, "%s(): extents do not fit in ram, running in multiple passes with %zu nodes per pass", 1259 __func__, btree_nodes_fit_in_ram(c)); 1260 1261 if (bbpos_cmp(start, BBPOS_MIN) || 1262 bbpos_cmp(end, BBPOS_MAX)) { 1263 struct printbuf buf = PRINTBUF; 1264 1265 prt_str(&buf, "check_backpointers_to_extents(): "); 1266 bch2_bbpos_to_text(&buf, start); 1267 prt_str(&buf, "-"); 1268 bch2_bbpos_to_text(&buf, end); 1269 1270 bch_verbose(c, "%s", buf.buf); 1271 printbuf_exit(&buf); 1272 } 1273 1274 ret = bch2_check_backpointers_to_extents_pass(trans, start, end); 1275 if (ret || !bbpos_cmp(end, BBPOS_MAX)) 1276 break; 1277 1278 start = bbpos_successor(end); 1279 } 1280 bch2_trans_put(trans); 1281 1282 bch2_btree_cache_unpin(c); 1283 1284 bch_err_fn(c, ret); 1285 return ret; 1286 } 1287