1 // SPDX-License-Identifier: GPL-2.0 2 #include "bcachefs.h" 3 #include "alloc_background.h" 4 #include "alloc_foreground.h" 5 #include "backpointers.h" 6 #include "bkey_buf.h" 7 #include "btree_cache.h" 8 #include "btree_io.h" 9 #include "btree_key_cache.h" 10 #include "btree_update.h" 11 #include "btree_update_interior.h" 12 #include "btree_gc.h" 13 #include "btree_write_buffer.h" 14 #include "buckets.h" 15 #include "buckets_waiting_for_journal.h" 16 #include "clock.h" 17 #include "debug.h" 18 #include "disk_accounting.h" 19 #include "ec.h" 20 #include "error.h" 21 #include "lru.h" 22 #include "recovery.h" 23 #include "trace.h" 24 #include "varint.h" 25 26 #include <linux/kthread.h> 27 #include <linux/math64.h> 28 #include <linux/random.h> 29 #include <linux/rculist.h> 30 #include <linux/rcupdate.h> 31 #include <linux/sched/task.h> 32 #include <linux/sort.h> 33 #include <linux/jiffies.h> 34 35 static void bch2_discard_one_bucket_fast(struct bch_dev *, u64); 36 37 /* Persistent alloc info: */ 38 39 static const unsigned BCH_ALLOC_V1_FIELD_BYTES[] = { 40 #define x(name, bits) [BCH_ALLOC_FIELD_V1_##name] = bits / 8, 41 BCH_ALLOC_FIELDS_V1() 42 #undef x 43 }; 44 45 struct bkey_alloc_unpacked { 46 u64 journal_seq; 47 u8 gen; 48 u8 oldest_gen; 49 u8 data_type; 50 bool need_discard:1; 51 bool need_inc_gen:1; 52 #define x(_name, _bits) u##_bits _name; 53 BCH_ALLOC_FIELDS_V2() 54 #undef x 55 }; 56 57 static inline u64 alloc_field_v1_get(const struct bch_alloc *a, 58 const void **p, unsigned field) 59 { 60 unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field]; 61 u64 v; 62 63 if (!(a->fields & (1 << field))) 64 return 0; 65 66 switch (bytes) { 67 case 1: 68 v = *((const u8 *) *p); 69 break; 70 case 2: 71 v = le16_to_cpup(*p); 72 break; 73 case 4: 74 v = le32_to_cpup(*p); 75 break; 76 case 8: 77 v = le64_to_cpup(*p); 78 break; 79 default: 80 BUG(); 81 } 82 83 *p += bytes; 84 return v; 85 } 86 87 static void bch2_alloc_unpack_v1(struct bkey_alloc_unpacked *out, 88 struct bkey_s_c k) 89 { 90 const struct bch_alloc *in = bkey_s_c_to_alloc(k).v; 91 const void *d = in->data; 92 unsigned idx = 0; 93 94 out->gen = in->gen; 95 96 #define x(_name, _bits) out->_name = alloc_field_v1_get(in, &d, idx++); 97 BCH_ALLOC_FIELDS_V1() 98 #undef x 99 } 100 101 static int bch2_alloc_unpack_v2(struct bkey_alloc_unpacked *out, 102 struct bkey_s_c k) 103 { 104 struct bkey_s_c_alloc_v2 a = bkey_s_c_to_alloc_v2(k); 105 const u8 *in = a.v->data; 106 const u8 *end = bkey_val_end(a); 107 unsigned fieldnr = 0; 108 int ret; 109 u64 v; 110 111 out->gen = a.v->gen; 112 out->oldest_gen = a.v->oldest_gen; 113 out->data_type = a.v->data_type; 114 115 #define x(_name, _bits) \ 116 if (fieldnr < a.v->nr_fields) { \ 117 ret = bch2_varint_decode_fast(in, end, &v); \ 118 if (ret < 0) \ 119 return ret; \ 120 in += ret; \ 121 } else { \ 122 v = 0; \ 123 } \ 124 out->_name = v; \ 125 if (v != out->_name) \ 126 return -1; \ 127 fieldnr++; 128 129 BCH_ALLOC_FIELDS_V2() 130 #undef x 131 return 0; 132 } 133 134 static int bch2_alloc_unpack_v3(struct bkey_alloc_unpacked *out, 135 struct bkey_s_c k) 136 { 137 struct bkey_s_c_alloc_v3 a = bkey_s_c_to_alloc_v3(k); 138 const u8 *in = a.v->data; 139 const u8 *end = bkey_val_end(a); 140 unsigned fieldnr = 0; 141 int ret; 142 u64 v; 143 144 out->gen = a.v->gen; 145 out->oldest_gen = a.v->oldest_gen; 146 out->data_type = a.v->data_type; 147 out->need_discard = BCH_ALLOC_V3_NEED_DISCARD(a.v); 148 out->need_inc_gen = BCH_ALLOC_V3_NEED_INC_GEN(a.v); 149 out->journal_seq = le64_to_cpu(a.v->journal_seq); 150 151 #define x(_name, _bits) \ 152 if (fieldnr < a.v->nr_fields) { \ 153 ret = bch2_varint_decode_fast(in, end, &v); \ 154 if (ret < 0) \ 155 return ret; \ 156 in += ret; \ 157 } else { \ 158 v = 0; \ 159 } \ 160 out->_name = v; \ 161 if (v != out->_name) \ 162 return -1; \ 163 fieldnr++; 164 165 BCH_ALLOC_FIELDS_V2() 166 #undef x 167 return 0; 168 } 169 170 static struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k) 171 { 172 struct bkey_alloc_unpacked ret = { .gen = 0 }; 173 174 switch (k.k->type) { 175 case KEY_TYPE_alloc: 176 bch2_alloc_unpack_v1(&ret, k); 177 break; 178 case KEY_TYPE_alloc_v2: 179 bch2_alloc_unpack_v2(&ret, k); 180 break; 181 case KEY_TYPE_alloc_v3: 182 bch2_alloc_unpack_v3(&ret, k); 183 break; 184 } 185 186 return ret; 187 } 188 189 static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a) 190 { 191 unsigned i, bytes = offsetof(struct bch_alloc, data); 192 193 for (i = 0; i < ARRAY_SIZE(BCH_ALLOC_V1_FIELD_BYTES); i++) 194 if (a->fields & (1 << i)) 195 bytes += BCH_ALLOC_V1_FIELD_BYTES[i]; 196 197 return DIV_ROUND_UP(bytes, sizeof(u64)); 198 } 199 200 int bch2_alloc_v1_validate(struct bch_fs *c, struct bkey_s_c k, 201 struct bkey_validate_context from) 202 { 203 struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k); 204 int ret = 0; 205 206 /* allow for unknown fields */ 207 bkey_fsck_err_on(bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v), 208 c, alloc_v1_val_size_bad, 209 "incorrect value size (%zu < %u)", 210 bkey_val_u64s(a.k), bch_alloc_v1_val_u64s(a.v)); 211 fsck_err: 212 return ret; 213 } 214 215 int bch2_alloc_v2_validate(struct bch_fs *c, struct bkey_s_c k, 216 struct bkey_validate_context from) 217 { 218 struct bkey_alloc_unpacked u; 219 int ret = 0; 220 221 bkey_fsck_err_on(bch2_alloc_unpack_v2(&u, k), 222 c, alloc_v2_unpack_error, 223 "unpack error"); 224 fsck_err: 225 return ret; 226 } 227 228 int bch2_alloc_v3_validate(struct bch_fs *c, struct bkey_s_c k, 229 struct bkey_validate_context from) 230 { 231 struct bkey_alloc_unpacked u; 232 int ret = 0; 233 234 bkey_fsck_err_on(bch2_alloc_unpack_v3(&u, k), 235 c, alloc_v3_unpack_error, 236 "unpack error"); 237 fsck_err: 238 return ret; 239 } 240 241 int bch2_alloc_v4_validate(struct bch_fs *c, struct bkey_s_c k, 242 struct bkey_validate_context from) 243 { 244 struct bch_alloc_v4 a; 245 int ret = 0; 246 247 bkey_val_copy(&a, bkey_s_c_to_alloc_v4(k)); 248 249 bkey_fsck_err_on(alloc_v4_u64s_noerror(&a) > bkey_val_u64s(k.k), 250 c, alloc_v4_val_size_bad, 251 "bad val size (%u > %zu)", 252 alloc_v4_u64s_noerror(&a), bkey_val_u64s(k.k)); 253 254 bkey_fsck_err_on(!BCH_ALLOC_V4_BACKPOINTERS_START(&a) && 255 BCH_ALLOC_V4_NR_BACKPOINTERS(&a), 256 c, alloc_v4_backpointers_start_bad, 257 "invalid backpointers_start"); 258 259 bkey_fsck_err_on(alloc_data_type(a, a.data_type) != a.data_type, 260 c, alloc_key_data_type_bad, 261 "invalid data type (got %u should be %u)", 262 a.data_type, alloc_data_type(a, a.data_type)); 263 264 for (unsigned i = 0; i < 2; i++) 265 bkey_fsck_err_on(a.io_time[i] > LRU_TIME_MAX, 266 c, alloc_key_io_time_bad, 267 "invalid io_time[%s]: %llu, max %llu", 268 i == READ ? "read" : "write", 269 a.io_time[i], LRU_TIME_MAX); 270 271 unsigned stripe_sectors = BCH_ALLOC_V4_BACKPOINTERS_START(&a) * sizeof(u64) > 272 offsetof(struct bch_alloc_v4, stripe_sectors) 273 ? a.stripe_sectors 274 : 0; 275 276 switch (a.data_type) { 277 case BCH_DATA_free: 278 case BCH_DATA_need_gc_gens: 279 case BCH_DATA_need_discard: 280 bkey_fsck_err_on(stripe_sectors || 281 a.dirty_sectors || 282 a.cached_sectors || 283 a.stripe, 284 c, alloc_key_empty_but_have_data, 285 "empty data type free but have data %u.%u.%u %u", 286 stripe_sectors, 287 a.dirty_sectors, 288 a.cached_sectors, 289 a.stripe); 290 break; 291 case BCH_DATA_sb: 292 case BCH_DATA_journal: 293 case BCH_DATA_btree: 294 case BCH_DATA_user: 295 case BCH_DATA_parity: 296 bkey_fsck_err_on(!a.dirty_sectors && 297 !stripe_sectors, 298 c, alloc_key_dirty_sectors_0, 299 "data_type %s but dirty_sectors==0", 300 bch2_data_type_str(a.data_type)); 301 break; 302 case BCH_DATA_cached: 303 bkey_fsck_err_on(!a.cached_sectors || 304 a.dirty_sectors || 305 stripe_sectors || 306 a.stripe, 307 c, alloc_key_cached_inconsistency, 308 "data type inconsistency"); 309 310 bkey_fsck_err_on(!a.io_time[READ] && 311 c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_to_lru_refs, 312 c, alloc_key_cached_but_read_time_zero, 313 "cached bucket with read_time == 0"); 314 break; 315 case BCH_DATA_stripe: 316 break; 317 } 318 fsck_err: 319 return ret; 320 } 321 322 void bch2_alloc_v4_swab(struct bkey_s k) 323 { 324 struct bch_alloc_v4 *a = bkey_s_to_alloc_v4(k).v; 325 326 a->journal_seq_nonempty = swab64(a->journal_seq_nonempty); 327 a->journal_seq_empty = swab64(a->journal_seq_empty); 328 a->flags = swab32(a->flags); 329 a->dirty_sectors = swab32(a->dirty_sectors); 330 a->cached_sectors = swab32(a->cached_sectors); 331 a->io_time[0] = swab64(a->io_time[0]); 332 a->io_time[1] = swab64(a->io_time[1]); 333 a->stripe = swab32(a->stripe); 334 a->nr_external_backpointers = swab32(a->nr_external_backpointers); 335 a->stripe_sectors = swab32(a->stripe_sectors); 336 } 337 338 void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k) 339 { 340 struct bch_alloc_v4 _a; 341 const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &_a); 342 struct bch_dev *ca = c ? bch2_dev_bucket_tryget_noerror(c, k.k->p) : NULL; 343 344 prt_newline(out); 345 printbuf_indent_add(out, 2); 346 347 prt_printf(out, "gen %u oldest_gen %u data_type ", a->gen, a->oldest_gen); 348 bch2_prt_data_type(out, a->data_type); 349 prt_newline(out); 350 prt_printf(out, "journal_seq_nonempty %llu\n", a->journal_seq_nonempty); 351 prt_printf(out, "journal_seq_empty %llu\n", a->journal_seq_empty); 352 prt_printf(out, "need_discard %llu\n", BCH_ALLOC_V4_NEED_DISCARD(a)); 353 prt_printf(out, "need_inc_gen %llu\n", BCH_ALLOC_V4_NEED_INC_GEN(a)); 354 prt_printf(out, "dirty_sectors %u\n", a->dirty_sectors); 355 prt_printf(out, "stripe_sectors %u\n", a->stripe_sectors); 356 prt_printf(out, "cached_sectors %u\n", a->cached_sectors); 357 prt_printf(out, "stripe %u\n", a->stripe); 358 prt_printf(out, "stripe_redundancy %u\n", a->stripe_redundancy); 359 prt_printf(out, "io_time[READ] %llu\n", a->io_time[READ]); 360 prt_printf(out, "io_time[WRITE] %llu\n", a->io_time[WRITE]); 361 362 if (ca) 363 prt_printf(out, "fragmentation %llu\n", alloc_lru_idx_fragmentation(*a, ca)); 364 prt_printf(out, "bp_start %llu\n", BCH_ALLOC_V4_BACKPOINTERS_START(a)); 365 printbuf_indent_sub(out, 2); 366 367 bch2_dev_put(ca); 368 } 369 370 void __bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out) 371 { 372 if (k.k->type == KEY_TYPE_alloc_v4) { 373 void *src, *dst; 374 375 *out = *bkey_s_c_to_alloc_v4(k).v; 376 377 src = alloc_v4_backpointers(out); 378 SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s); 379 dst = alloc_v4_backpointers(out); 380 381 if (src < dst) 382 memset(src, 0, dst - src); 383 384 SET_BCH_ALLOC_V4_NR_BACKPOINTERS(out, 0); 385 } else { 386 struct bkey_alloc_unpacked u = bch2_alloc_unpack(k); 387 388 *out = (struct bch_alloc_v4) { 389 .journal_seq_nonempty = u.journal_seq, 390 .flags = u.need_discard, 391 .gen = u.gen, 392 .oldest_gen = u.oldest_gen, 393 .data_type = u.data_type, 394 .stripe_redundancy = u.stripe_redundancy, 395 .dirty_sectors = u.dirty_sectors, 396 .cached_sectors = u.cached_sectors, 397 .io_time[READ] = u.read_time, 398 .io_time[WRITE] = u.write_time, 399 .stripe = u.stripe, 400 }; 401 402 SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s); 403 } 404 } 405 406 static noinline struct bkey_i_alloc_v4 * 407 __bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k) 408 { 409 struct bkey_i_alloc_v4 *ret; 410 411 ret = bch2_trans_kmalloc(trans, max(bkey_bytes(k.k), sizeof(struct bkey_i_alloc_v4))); 412 if (IS_ERR(ret)) 413 return ret; 414 415 if (k.k->type == KEY_TYPE_alloc_v4) { 416 void *src, *dst; 417 418 bkey_reassemble(&ret->k_i, k); 419 420 src = alloc_v4_backpointers(&ret->v); 421 SET_BCH_ALLOC_V4_BACKPOINTERS_START(&ret->v, BCH_ALLOC_V4_U64s); 422 dst = alloc_v4_backpointers(&ret->v); 423 424 if (src < dst) 425 memset(src, 0, dst - src); 426 427 SET_BCH_ALLOC_V4_NR_BACKPOINTERS(&ret->v, 0); 428 set_alloc_v4_u64s(ret); 429 } else { 430 bkey_alloc_v4_init(&ret->k_i); 431 ret->k.p = k.k->p; 432 bch2_alloc_to_v4(k, &ret->v); 433 } 434 return ret; 435 } 436 437 static inline struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut_inlined(struct btree_trans *trans, struct bkey_s_c k) 438 { 439 struct bkey_s_c_alloc_v4 a; 440 441 if (likely(k.k->type == KEY_TYPE_alloc_v4) && 442 ((a = bkey_s_c_to_alloc_v4(k), true) && 443 BCH_ALLOC_V4_NR_BACKPOINTERS(a.v) == 0)) 444 return bch2_bkey_make_mut_noupdate_typed(trans, k, alloc_v4); 445 446 return __bch2_alloc_to_v4_mut(trans, k); 447 } 448 449 struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k) 450 { 451 return bch2_alloc_to_v4_mut_inlined(trans, k); 452 } 453 454 struct bkey_i_alloc_v4 * 455 bch2_trans_start_alloc_update_noupdate(struct btree_trans *trans, struct btree_iter *iter, 456 struct bpos pos) 457 { 458 struct bkey_s_c k = bch2_bkey_get_iter(trans, iter, BTREE_ID_alloc, pos, 459 BTREE_ITER_with_updates| 460 BTREE_ITER_cached| 461 BTREE_ITER_intent); 462 int ret = bkey_err(k); 463 if (unlikely(ret)) 464 return ERR_PTR(ret); 465 466 struct bkey_i_alloc_v4 *a = bch2_alloc_to_v4_mut_inlined(trans, k); 467 ret = PTR_ERR_OR_ZERO(a); 468 if (unlikely(ret)) 469 goto err; 470 return a; 471 err: 472 bch2_trans_iter_exit(trans, iter); 473 return ERR_PTR(ret); 474 } 475 476 __flatten 477 struct bkey_i_alloc_v4 *bch2_trans_start_alloc_update(struct btree_trans *trans, struct bpos pos, 478 enum btree_iter_update_trigger_flags flags) 479 { 480 struct btree_iter iter; 481 struct bkey_i_alloc_v4 *a = bch2_trans_start_alloc_update_noupdate(trans, &iter, pos); 482 int ret = PTR_ERR_OR_ZERO(a); 483 if (ret) 484 return ERR_PTR(ret); 485 486 ret = bch2_trans_update(trans, &iter, &a->k_i, flags); 487 bch2_trans_iter_exit(trans, &iter); 488 return unlikely(ret) ? ERR_PTR(ret) : a; 489 } 490 491 static struct bpos alloc_gens_pos(struct bpos pos, unsigned *offset) 492 { 493 *offset = pos.offset & KEY_TYPE_BUCKET_GENS_MASK; 494 495 pos.offset >>= KEY_TYPE_BUCKET_GENS_BITS; 496 return pos; 497 } 498 499 static struct bpos bucket_gens_pos_to_alloc(struct bpos pos, unsigned offset) 500 { 501 pos.offset <<= KEY_TYPE_BUCKET_GENS_BITS; 502 pos.offset += offset; 503 return pos; 504 } 505 506 static unsigned alloc_gen(struct bkey_s_c k, unsigned offset) 507 { 508 return k.k->type == KEY_TYPE_bucket_gens 509 ? bkey_s_c_to_bucket_gens(k).v->gens[offset] 510 : 0; 511 } 512 513 int bch2_bucket_gens_validate(struct bch_fs *c, struct bkey_s_c k, 514 struct bkey_validate_context from) 515 { 516 int ret = 0; 517 518 bkey_fsck_err_on(bkey_val_bytes(k.k) != sizeof(struct bch_bucket_gens), 519 c, bucket_gens_val_size_bad, 520 "bad val size (%zu != %zu)", 521 bkey_val_bytes(k.k), sizeof(struct bch_bucket_gens)); 522 fsck_err: 523 return ret; 524 } 525 526 void bch2_bucket_gens_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k) 527 { 528 struct bkey_s_c_bucket_gens g = bkey_s_c_to_bucket_gens(k); 529 unsigned i; 530 531 for (i = 0; i < ARRAY_SIZE(g.v->gens); i++) { 532 if (i) 533 prt_char(out, ' '); 534 prt_printf(out, "%u", g.v->gens[i]); 535 } 536 } 537 538 int bch2_bucket_gens_init(struct bch_fs *c) 539 { 540 struct btree_trans *trans = bch2_trans_get(c); 541 struct bkey_i_bucket_gens g; 542 bool have_bucket_gens_key = false; 543 int ret; 544 545 ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN, 546 BTREE_ITER_prefetch, k, ({ 547 /* 548 * Not a fsck error because this is checked/repaired by 549 * bch2_check_alloc_key() which runs later: 550 */ 551 if (!bch2_dev_bucket_exists(c, k.k->p)) 552 continue; 553 554 struct bch_alloc_v4 a; 555 u8 gen = bch2_alloc_to_v4(k, &a)->gen; 556 unsigned offset; 557 struct bpos pos = alloc_gens_pos(iter.pos, &offset); 558 int ret2 = 0; 559 560 if (have_bucket_gens_key && !bkey_eq(g.k.p, pos)) { 561 ret2 = bch2_btree_insert_trans(trans, BTREE_ID_bucket_gens, &g.k_i, 0) ?: 562 bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc); 563 if (ret2) 564 goto iter_err; 565 have_bucket_gens_key = false; 566 } 567 568 if (!have_bucket_gens_key) { 569 bkey_bucket_gens_init(&g.k_i); 570 g.k.p = pos; 571 have_bucket_gens_key = true; 572 } 573 574 g.v.gens[offset] = gen; 575 iter_err: 576 ret2; 577 })); 578 579 if (have_bucket_gens_key && !ret) 580 ret = commit_do(trans, NULL, NULL, 581 BCH_TRANS_COMMIT_no_enospc, 582 bch2_btree_insert_trans(trans, BTREE_ID_bucket_gens, &g.k_i, 0)); 583 584 bch2_trans_put(trans); 585 586 bch_err_fn(c, ret); 587 return ret; 588 } 589 590 int bch2_alloc_read(struct bch_fs *c) 591 { 592 down_read(&c->state_lock); 593 594 struct btree_trans *trans = bch2_trans_get(c); 595 struct bch_dev *ca = NULL; 596 int ret; 597 598 if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_bucket_gens) { 599 ret = for_each_btree_key(trans, iter, BTREE_ID_bucket_gens, POS_MIN, 600 BTREE_ITER_prefetch, k, ({ 601 u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset; 602 u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset; 603 604 if (k.k->type != KEY_TYPE_bucket_gens) 605 continue; 606 607 ca = bch2_dev_iterate(c, ca, k.k->p.inode); 608 /* 609 * Not a fsck error because this is checked/repaired by 610 * bch2_check_alloc_key() which runs later: 611 */ 612 if (!ca) { 613 bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0)); 614 continue; 615 } 616 617 const struct bch_bucket_gens *g = bkey_s_c_to_bucket_gens(k).v; 618 619 for (u64 b = max_t(u64, ca->mi.first_bucket, start); 620 b < min_t(u64, ca->mi.nbuckets, end); 621 b++) 622 *bucket_gen(ca, b) = g->gens[b & KEY_TYPE_BUCKET_GENS_MASK]; 623 0; 624 })); 625 } else { 626 ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN, 627 BTREE_ITER_prefetch, k, ({ 628 ca = bch2_dev_iterate(c, ca, k.k->p.inode); 629 /* 630 * Not a fsck error because this is checked/repaired by 631 * bch2_check_alloc_key() which runs later: 632 */ 633 if (!ca) { 634 bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0)); 635 continue; 636 } 637 638 if (k.k->p.offset < ca->mi.first_bucket) { 639 bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode, ca->mi.first_bucket)); 640 continue; 641 } 642 643 if (k.k->p.offset >= ca->mi.nbuckets) { 644 bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0)); 645 continue; 646 } 647 648 struct bch_alloc_v4 a; 649 *bucket_gen(ca, k.k->p.offset) = bch2_alloc_to_v4(k, &a)->gen; 650 0; 651 })); 652 } 653 654 bch2_dev_put(ca); 655 bch2_trans_put(trans); 656 657 up_read(&c->state_lock); 658 bch_err_fn(c, ret); 659 return ret; 660 } 661 662 /* Free space/discard btree: */ 663 664 static int __need_discard_or_freespace_err(struct btree_trans *trans, 665 struct bkey_s_c alloc_k, 666 bool set, bool discard, bool repair) 667 { 668 struct bch_fs *c = trans->c; 669 enum bch_fsck_flags flags = FSCK_CAN_IGNORE|(repair ? FSCK_CAN_FIX : 0); 670 enum bch_sb_error_id err_id = discard 671 ? BCH_FSCK_ERR_need_discard_key_wrong 672 : BCH_FSCK_ERR_freespace_key_wrong; 673 enum btree_id btree = discard ? BTREE_ID_need_discard : BTREE_ID_freespace; 674 struct printbuf buf = PRINTBUF; 675 676 bch2_bkey_val_to_text(&buf, c, alloc_k); 677 678 int ret = __bch2_fsck_err(NULL, trans, flags, err_id, 679 "bucket incorrectly %sset in %s btree\n%s", 680 set ? "" : "un", 681 bch2_btree_id_str(btree), 682 buf.buf); 683 if (ret == -BCH_ERR_fsck_ignore || 684 ret == -BCH_ERR_fsck_errors_not_fixed) 685 ret = 0; 686 687 printbuf_exit(&buf); 688 return ret; 689 } 690 691 #define need_discard_or_freespace_err(...) \ 692 fsck_err_wrap(__need_discard_or_freespace_err(__VA_ARGS__)) 693 694 #define need_discard_or_freespace_err_on(cond, ...) \ 695 (unlikely(cond) ? need_discard_or_freespace_err(__VA_ARGS__) : false) 696 697 static int bch2_bucket_do_index(struct btree_trans *trans, 698 struct bch_dev *ca, 699 struct bkey_s_c alloc_k, 700 const struct bch_alloc_v4 *a, 701 bool set) 702 { 703 enum btree_id btree; 704 struct bpos pos; 705 706 if (a->data_type != BCH_DATA_free && 707 a->data_type != BCH_DATA_need_discard) 708 return 0; 709 710 switch (a->data_type) { 711 case BCH_DATA_free: 712 btree = BTREE_ID_freespace; 713 pos = alloc_freespace_pos(alloc_k.k->p, *a); 714 break; 715 case BCH_DATA_need_discard: 716 btree = BTREE_ID_need_discard; 717 pos = alloc_k.k->p; 718 break; 719 default: 720 return 0; 721 } 722 723 struct btree_iter iter; 724 struct bkey_s_c old = bch2_bkey_get_iter(trans, &iter, btree, pos, BTREE_ITER_intent); 725 int ret = bkey_err(old); 726 if (ret) 727 return ret; 728 729 need_discard_or_freespace_err_on(ca->mi.freespace_initialized && 730 !old.k->type != set, 731 trans, alloc_k, set, 732 btree == BTREE_ID_need_discard, false); 733 734 ret = bch2_btree_bit_mod_iter(trans, &iter, set); 735 fsck_err: 736 bch2_trans_iter_exit(trans, &iter); 737 return ret; 738 } 739 740 static noinline int bch2_bucket_gen_update(struct btree_trans *trans, 741 struct bpos bucket, u8 gen) 742 { 743 struct btree_iter iter; 744 unsigned offset; 745 struct bpos pos = alloc_gens_pos(bucket, &offset); 746 struct bkey_i_bucket_gens *g; 747 struct bkey_s_c k; 748 int ret; 749 750 g = bch2_trans_kmalloc(trans, sizeof(*g)); 751 ret = PTR_ERR_OR_ZERO(g); 752 if (ret) 753 return ret; 754 755 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_bucket_gens, pos, 756 BTREE_ITER_intent| 757 BTREE_ITER_with_updates); 758 ret = bkey_err(k); 759 if (ret) 760 return ret; 761 762 if (k.k->type != KEY_TYPE_bucket_gens) { 763 bkey_bucket_gens_init(&g->k_i); 764 g->k.p = iter.pos; 765 } else { 766 bkey_reassemble(&g->k_i, k); 767 } 768 769 g->v.gens[offset] = gen; 770 771 ret = bch2_trans_update(trans, &iter, &g->k_i, 0); 772 bch2_trans_iter_exit(trans, &iter); 773 return ret; 774 } 775 776 static inline int bch2_dev_data_type_accounting_mod(struct btree_trans *trans, struct bch_dev *ca, 777 enum bch_data_type data_type, 778 s64 delta_buckets, 779 s64 delta_sectors, 780 s64 delta_fragmented, unsigned flags) 781 { 782 s64 d[3] = { delta_buckets, delta_sectors, delta_fragmented }; 783 784 return bch2_disk_accounting_mod2(trans, flags & BTREE_TRIGGER_gc, 785 d, dev_data_type, 786 .dev = ca->dev_idx, 787 .data_type = data_type); 788 } 789 790 int bch2_alloc_key_to_dev_counters(struct btree_trans *trans, struct bch_dev *ca, 791 const struct bch_alloc_v4 *old, 792 const struct bch_alloc_v4 *new, 793 unsigned flags) 794 { 795 s64 old_sectors = bch2_bucket_sectors(*old); 796 s64 new_sectors = bch2_bucket_sectors(*new); 797 if (old->data_type != new->data_type) { 798 int ret = bch2_dev_data_type_accounting_mod(trans, ca, new->data_type, 799 1, new_sectors, bch2_bucket_sectors_fragmented(ca, *new), flags) ?: 800 bch2_dev_data_type_accounting_mod(trans, ca, old->data_type, 801 -1, -old_sectors, -bch2_bucket_sectors_fragmented(ca, *old), flags); 802 if (ret) 803 return ret; 804 } else if (old_sectors != new_sectors) { 805 int ret = bch2_dev_data_type_accounting_mod(trans, ca, new->data_type, 806 0, 807 new_sectors - old_sectors, 808 bch2_bucket_sectors_fragmented(ca, *new) - 809 bch2_bucket_sectors_fragmented(ca, *old), flags); 810 if (ret) 811 return ret; 812 } 813 814 s64 old_unstriped = bch2_bucket_sectors_unstriped(*old); 815 s64 new_unstriped = bch2_bucket_sectors_unstriped(*new); 816 if (old_unstriped != new_unstriped) { 817 int ret = bch2_dev_data_type_accounting_mod(trans, ca, BCH_DATA_unstriped, 818 !!new_unstriped - !!old_unstriped, 819 new_unstriped - old_unstriped, 820 0, 821 flags); 822 if (ret) 823 return ret; 824 } 825 826 return 0; 827 } 828 829 int bch2_trigger_alloc(struct btree_trans *trans, 830 enum btree_id btree, unsigned level, 831 struct bkey_s_c old, struct bkey_s new, 832 enum btree_iter_update_trigger_flags flags) 833 { 834 struct bch_fs *c = trans->c; 835 struct printbuf buf = PRINTBUF; 836 int ret = 0; 837 838 struct bch_dev *ca = bch2_dev_bucket_tryget(c, new.k->p); 839 if (!ca) 840 return -BCH_ERR_trigger_alloc; 841 842 struct bch_alloc_v4 old_a_convert; 843 const struct bch_alloc_v4 *old_a = bch2_alloc_to_v4(old, &old_a_convert); 844 845 struct bch_alloc_v4 *new_a; 846 if (likely(new.k->type == KEY_TYPE_alloc_v4)) { 847 new_a = bkey_s_to_alloc_v4(new).v; 848 } else { 849 BUG_ON(!(flags & (BTREE_TRIGGER_gc|BTREE_TRIGGER_check_repair))); 850 851 struct bkey_i_alloc_v4 *new_ka = bch2_alloc_to_v4_mut_inlined(trans, new.s_c); 852 ret = PTR_ERR_OR_ZERO(new_ka); 853 if (unlikely(ret)) 854 goto err; 855 new_a = &new_ka->v; 856 } 857 858 if (flags & BTREE_TRIGGER_transactional) { 859 alloc_data_type_set(new_a, new_a->data_type); 860 861 int is_empty_delta = (int) data_type_is_empty(new_a->data_type) - 862 (int) data_type_is_empty(old_a->data_type); 863 864 if (is_empty_delta < 0) { 865 new_a->io_time[READ] = bch2_current_io_time(c, READ); 866 new_a->io_time[WRITE]= bch2_current_io_time(c, WRITE); 867 SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, true); 868 SET_BCH_ALLOC_V4_NEED_DISCARD(new_a, true); 869 } 870 871 if (data_type_is_empty(new_a->data_type) && 872 BCH_ALLOC_V4_NEED_INC_GEN(new_a) && 873 !bch2_bucket_is_open_safe(c, new.k->p.inode, new.k->p.offset)) { 874 if (new_a->oldest_gen == new_a->gen && 875 !bch2_bucket_sectors_total(*new_a)) 876 new_a->oldest_gen++; 877 new_a->gen++; 878 SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, false); 879 alloc_data_type_set(new_a, new_a->data_type); 880 } 881 882 if (old_a->data_type != new_a->data_type || 883 (new_a->data_type == BCH_DATA_free && 884 alloc_freespace_genbits(*old_a) != alloc_freespace_genbits(*new_a))) { 885 ret = bch2_bucket_do_index(trans, ca, old, old_a, false) ?: 886 bch2_bucket_do_index(trans, ca, new.s_c, new_a, true); 887 if (ret) 888 goto err; 889 } 890 891 if (new_a->data_type == BCH_DATA_cached && 892 !new_a->io_time[READ]) 893 new_a->io_time[READ] = bch2_current_io_time(c, READ); 894 895 ret = bch2_lru_change(trans, new.k->p.inode, 896 bucket_to_u64(new.k->p), 897 alloc_lru_idx_read(*old_a), 898 alloc_lru_idx_read(*new_a)); 899 if (ret) 900 goto err; 901 902 ret = bch2_lru_change(trans, 903 BCH_LRU_BUCKET_FRAGMENTATION, 904 bucket_to_u64(new.k->p), 905 alloc_lru_idx_fragmentation(*old_a, ca), 906 alloc_lru_idx_fragmentation(*new_a, ca)); 907 if (ret) 908 goto err; 909 910 if (old_a->gen != new_a->gen) { 911 ret = bch2_bucket_gen_update(trans, new.k->p, new_a->gen); 912 if (ret) 913 goto err; 914 } 915 916 if ((flags & BTREE_TRIGGER_bucket_invalidate) && 917 old_a->cached_sectors) { 918 ret = bch2_mod_dev_cached_sectors(trans, ca->dev_idx, 919 -((s64) old_a->cached_sectors), 920 flags & BTREE_TRIGGER_gc); 921 if (ret) 922 goto err; 923 } 924 925 ret = bch2_alloc_key_to_dev_counters(trans, ca, old_a, new_a, flags); 926 if (ret) 927 goto err; 928 } 929 930 if ((flags & BTREE_TRIGGER_atomic) && (flags & BTREE_TRIGGER_insert)) { 931 u64 transaction_seq = trans->journal_res.seq; 932 BUG_ON(!transaction_seq); 933 934 if (log_fsck_err_on(transaction_seq && new_a->journal_seq_nonempty > transaction_seq, 935 trans, alloc_key_journal_seq_in_future, 936 "bucket journal seq in future (currently at %llu)\n%s", 937 journal_cur_seq(&c->journal), 938 (bch2_bkey_val_to_text(&buf, c, new.s_c), buf.buf))) 939 new_a->journal_seq_nonempty = transaction_seq; 940 941 int is_empty_delta = (int) data_type_is_empty(new_a->data_type) - 942 (int) data_type_is_empty(old_a->data_type); 943 944 /* 945 * Record journal sequence number of empty -> nonempty transition: 946 * Note that there may be multiple empty -> nonempty 947 * transitions, data in a bucket may be overwritten while we're 948 * still writing to it - so be careful to only record the first: 949 * */ 950 if (is_empty_delta < 0 && 951 new_a->journal_seq_empty <= c->journal.flushed_seq_ondisk) { 952 new_a->journal_seq_nonempty = transaction_seq; 953 new_a->journal_seq_empty = 0; 954 } 955 956 /* 957 * Bucket becomes empty: mark it as waiting for a journal flush, 958 * unless updates since empty -> nonempty transition were never 959 * flushed - we may need to ask the journal not to flush 960 * intermediate sequence numbers: 961 */ 962 if (is_empty_delta > 0) { 963 if (new_a->journal_seq_nonempty == transaction_seq || 964 bch2_journal_noflush_seq(&c->journal, 965 new_a->journal_seq_nonempty, 966 transaction_seq)) { 967 new_a->journal_seq_nonempty = new_a->journal_seq_empty = 0; 968 } else { 969 new_a->journal_seq_empty = transaction_seq; 970 971 ret = bch2_set_bucket_needs_journal_commit(&c->buckets_waiting_for_journal, 972 c->journal.flushed_seq_ondisk, 973 new.k->p.inode, new.k->p.offset, 974 transaction_seq); 975 if (bch2_fs_fatal_err_on(ret, c, 976 "setting bucket_needs_journal_commit: %s", 977 bch2_err_str(ret))) 978 goto err; 979 } 980 } 981 982 if (new_a->gen != old_a->gen) { 983 rcu_read_lock(); 984 u8 *gen = bucket_gen(ca, new.k->p.offset); 985 if (unlikely(!gen)) { 986 rcu_read_unlock(); 987 goto invalid_bucket; 988 } 989 *gen = new_a->gen; 990 rcu_read_unlock(); 991 } 992 993 #define eval_state(_a, expr) ({ const struct bch_alloc_v4 *a = _a; expr; }) 994 #define statechange(expr) !eval_state(old_a, expr) && eval_state(new_a, expr) 995 #define bucket_flushed(a) (a->journal_seq_empty <= c->journal.flushed_seq_ondisk) 996 997 if (statechange(a->data_type == BCH_DATA_free) && 998 bucket_flushed(new_a)) 999 closure_wake_up(&c->freelist_wait); 1000 1001 if (statechange(a->data_type == BCH_DATA_need_discard) && 1002 !bch2_bucket_is_open_safe(c, new.k->p.inode, new.k->p.offset) && 1003 bucket_flushed(new_a)) 1004 bch2_discard_one_bucket_fast(ca, new.k->p.offset); 1005 1006 if (statechange(a->data_type == BCH_DATA_cached) && 1007 !bch2_bucket_is_open(c, new.k->p.inode, new.k->p.offset) && 1008 should_invalidate_buckets(ca, bch2_dev_usage_read(ca))) 1009 bch2_dev_do_invalidates(ca); 1010 1011 if (statechange(a->data_type == BCH_DATA_need_gc_gens)) 1012 bch2_gc_gens_async(c); 1013 } 1014 1015 if ((flags & BTREE_TRIGGER_gc) && (flags & BTREE_TRIGGER_insert)) { 1016 rcu_read_lock(); 1017 struct bucket *g = gc_bucket(ca, new.k->p.offset); 1018 if (unlikely(!g)) { 1019 rcu_read_unlock(); 1020 goto invalid_bucket; 1021 } 1022 g->gen_valid = 1; 1023 g->gen = new_a->gen; 1024 rcu_read_unlock(); 1025 } 1026 err: 1027 fsck_err: 1028 printbuf_exit(&buf); 1029 bch2_dev_put(ca); 1030 return ret; 1031 invalid_bucket: 1032 bch2_fs_inconsistent(c, "reference to invalid bucket\n%s", 1033 (bch2_bkey_val_to_text(&buf, c, new.s_c), buf.buf)); 1034 ret = -BCH_ERR_trigger_alloc; 1035 goto err; 1036 } 1037 1038 /* 1039 * This synthesizes deleted extents for holes, similar to BTREE_ITER_slots for 1040 * extents style btrees, but works on non-extents btrees: 1041 */ 1042 static struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos end, struct bkey *hole) 1043 { 1044 struct bkey_s_c k = bch2_btree_iter_peek_slot(iter); 1045 1046 if (bkey_err(k)) 1047 return k; 1048 1049 if (k.k->type) { 1050 return k; 1051 } else { 1052 struct btree_iter iter2; 1053 struct bpos next; 1054 1055 bch2_trans_copy_iter(&iter2, iter); 1056 1057 struct btree_path *path = btree_iter_path(iter->trans, iter); 1058 if (!bpos_eq(path->l[0].b->key.k.p, SPOS_MAX)) 1059 end = bkey_min(end, bpos_nosnap_successor(path->l[0].b->key.k.p)); 1060 1061 end = bkey_min(end, POS(iter->pos.inode, iter->pos.offset + U32_MAX - 1)); 1062 1063 /* 1064 * btree node min/max is a closed interval, upto takes a half 1065 * open interval: 1066 */ 1067 k = bch2_btree_iter_peek_max(&iter2, end); 1068 next = iter2.pos; 1069 bch2_trans_iter_exit(iter->trans, &iter2); 1070 1071 BUG_ON(next.offset >= iter->pos.offset + U32_MAX); 1072 1073 if (bkey_err(k)) 1074 return k; 1075 1076 bkey_init(hole); 1077 hole->p = iter->pos; 1078 1079 bch2_key_resize(hole, next.offset - iter->pos.offset); 1080 return (struct bkey_s_c) { hole, NULL }; 1081 } 1082 } 1083 1084 static bool next_bucket(struct bch_fs *c, struct bch_dev **ca, struct bpos *bucket) 1085 { 1086 if (*ca) { 1087 if (bucket->offset < (*ca)->mi.first_bucket) 1088 bucket->offset = (*ca)->mi.first_bucket; 1089 1090 if (bucket->offset < (*ca)->mi.nbuckets) 1091 return true; 1092 1093 bch2_dev_put(*ca); 1094 *ca = NULL; 1095 bucket->inode++; 1096 bucket->offset = 0; 1097 } 1098 1099 rcu_read_lock(); 1100 *ca = __bch2_next_dev_idx(c, bucket->inode, NULL); 1101 if (*ca) { 1102 *bucket = POS((*ca)->dev_idx, (*ca)->mi.first_bucket); 1103 bch2_dev_get(*ca); 1104 } 1105 rcu_read_unlock(); 1106 1107 return *ca != NULL; 1108 } 1109 1110 static struct bkey_s_c bch2_get_key_or_real_bucket_hole(struct btree_iter *iter, 1111 struct bch_dev **ca, struct bkey *hole) 1112 { 1113 struct bch_fs *c = iter->trans->c; 1114 struct bkey_s_c k; 1115 again: 1116 k = bch2_get_key_or_hole(iter, POS_MAX, hole); 1117 if (bkey_err(k)) 1118 return k; 1119 1120 *ca = bch2_dev_iterate_noerror(c, *ca, k.k->p.inode); 1121 1122 if (!k.k->type) { 1123 struct bpos hole_start = bkey_start_pos(k.k); 1124 1125 if (!*ca || !bucket_valid(*ca, hole_start.offset)) { 1126 if (!next_bucket(c, ca, &hole_start)) 1127 return bkey_s_c_null; 1128 1129 bch2_btree_iter_set_pos(iter, hole_start); 1130 goto again; 1131 } 1132 1133 if (k.k->p.offset > (*ca)->mi.nbuckets) 1134 bch2_key_resize(hole, (*ca)->mi.nbuckets - hole_start.offset); 1135 } 1136 1137 return k; 1138 } 1139 1140 static noinline_for_stack 1141 int bch2_check_alloc_key(struct btree_trans *trans, 1142 struct bkey_s_c alloc_k, 1143 struct btree_iter *alloc_iter, 1144 struct btree_iter *discard_iter, 1145 struct btree_iter *freespace_iter, 1146 struct btree_iter *bucket_gens_iter) 1147 { 1148 struct bch_fs *c = trans->c; 1149 struct bch_alloc_v4 a_convert; 1150 const struct bch_alloc_v4 *a; 1151 unsigned gens_offset; 1152 struct bkey_s_c k; 1153 struct printbuf buf = PRINTBUF; 1154 int ret = 0; 1155 1156 struct bch_dev *ca = bch2_dev_bucket_tryget_noerror(c, alloc_k.k->p); 1157 if (fsck_err_on(!ca, 1158 trans, alloc_key_to_missing_dev_bucket, 1159 "alloc key for invalid device:bucket %llu:%llu", 1160 alloc_k.k->p.inode, alloc_k.k->p.offset)) 1161 ret = bch2_btree_delete_at(trans, alloc_iter, 0); 1162 if (!ca) 1163 return ret; 1164 1165 if (!ca->mi.freespace_initialized) 1166 goto out; 1167 1168 a = bch2_alloc_to_v4(alloc_k, &a_convert); 1169 1170 bch2_btree_iter_set_pos(discard_iter, alloc_k.k->p); 1171 k = bch2_btree_iter_peek_slot(discard_iter); 1172 ret = bkey_err(k); 1173 if (ret) 1174 goto err; 1175 1176 bool is_discarded = a->data_type == BCH_DATA_need_discard; 1177 if (need_discard_or_freespace_err_on(!!k.k->type != is_discarded, 1178 trans, alloc_k, !is_discarded, true, true)) { 1179 ret = bch2_btree_bit_mod_iter(trans, discard_iter, is_discarded); 1180 if (ret) 1181 goto err; 1182 } 1183 1184 bch2_btree_iter_set_pos(freespace_iter, alloc_freespace_pos(alloc_k.k->p, *a)); 1185 k = bch2_btree_iter_peek_slot(freespace_iter); 1186 ret = bkey_err(k); 1187 if (ret) 1188 goto err; 1189 1190 bool is_free = a->data_type == BCH_DATA_free; 1191 if (need_discard_or_freespace_err_on(!!k.k->type != is_free, 1192 trans, alloc_k, !is_free, false, true)) { 1193 ret = bch2_btree_bit_mod_iter(trans, freespace_iter, is_free); 1194 if (ret) 1195 goto err; 1196 } 1197 1198 bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(alloc_k.k->p, &gens_offset)); 1199 k = bch2_btree_iter_peek_slot(bucket_gens_iter); 1200 ret = bkey_err(k); 1201 if (ret) 1202 goto err; 1203 1204 if (fsck_err_on(a->gen != alloc_gen(k, gens_offset), 1205 trans, bucket_gens_key_wrong, 1206 "incorrect gen in bucket_gens btree (got %u should be %u)\n%s", 1207 alloc_gen(k, gens_offset), a->gen, 1208 (printbuf_reset(&buf), 1209 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) { 1210 struct bkey_i_bucket_gens *g = 1211 bch2_trans_kmalloc(trans, sizeof(*g)); 1212 1213 ret = PTR_ERR_OR_ZERO(g); 1214 if (ret) 1215 goto err; 1216 1217 if (k.k->type == KEY_TYPE_bucket_gens) { 1218 bkey_reassemble(&g->k_i, k); 1219 } else { 1220 bkey_bucket_gens_init(&g->k_i); 1221 g->k.p = alloc_gens_pos(alloc_k.k->p, &gens_offset); 1222 } 1223 1224 g->v.gens[gens_offset] = a->gen; 1225 1226 ret = bch2_trans_update(trans, bucket_gens_iter, &g->k_i, 0); 1227 if (ret) 1228 goto err; 1229 } 1230 out: 1231 err: 1232 fsck_err: 1233 bch2_dev_put(ca); 1234 printbuf_exit(&buf); 1235 return ret; 1236 } 1237 1238 static noinline_for_stack 1239 int bch2_check_alloc_hole_freespace(struct btree_trans *trans, 1240 struct bch_dev *ca, 1241 struct bpos start, 1242 struct bpos *end, 1243 struct btree_iter *freespace_iter) 1244 { 1245 struct bkey_s_c k; 1246 struct printbuf buf = PRINTBUF; 1247 int ret; 1248 1249 if (!ca->mi.freespace_initialized) 1250 return 0; 1251 1252 bch2_btree_iter_set_pos(freespace_iter, start); 1253 1254 k = bch2_btree_iter_peek_slot(freespace_iter); 1255 ret = bkey_err(k); 1256 if (ret) 1257 goto err; 1258 1259 *end = bkey_min(k.k->p, *end); 1260 1261 if (fsck_err_on(k.k->type != KEY_TYPE_set, 1262 trans, freespace_hole_missing, 1263 "hole in alloc btree missing in freespace btree\n" 1264 "device %llu buckets %llu-%llu", 1265 freespace_iter->pos.inode, 1266 freespace_iter->pos.offset, 1267 end->offset)) { 1268 struct bkey_i *update = 1269 bch2_trans_kmalloc(trans, sizeof(*update)); 1270 1271 ret = PTR_ERR_OR_ZERO(update); 1272 if (ret) 1273 goto err; 1274 1275 bkey_init(&update->k); 1276 update->k.type = KEY_TYPE_set; 1277 update->k.p = freespace_iter->pos; 1278 bch2_key_resize(&update->k, 1279 min_t(u64, U32_MAX, end->offset - 1280 freespace_iter->pos.offset)); 1281 1282 ret = bch2_trans_update(trans, freespace_iter, update, 0); 1283 if (ret) 1284 goto err; 1285 } 1286 err: 1287 fsck_err: 1288 printbuf_exit(&buf); 1289 return ret; 1290 } 1291 1292 static noinline_for_stack 1293 int bch2_check_alloc_hole_bucket_gens(struct btree_trans *trans, 1294 struct bpos start, 1295 struct bpos *end, 1296 struct btree_iter *bucket_gens_iter) 1297 { 1298 struct bkey_s_c k; 1299 struct printbuf buf = PRINTBUF; 1300 unsigned i, gens_offset, gens_end_offset; 1301 int ret; 1302 1303 bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(start, &gens_offset)); 1304 1305 k = bch2_btree_iter_peek_slot(bucket_gens_iter); 1306 ret = bkey_err(k); 1307 if (ret) 1308 goto err; 1309 1310 if (bkey_cmp(alloc_gens_pos(start, &gens_offset), 1311 alloc_gens_pos(*end, &gens_end_offset))) 1312 gens_end_offset = KEY_TYPE_BUCKET_GENS_NR; 1313 1314 if (k.k->type == KEY_TYPE_bucket_gens) { 1315 struct bkey_i_bucket_gens g; 1316 bool need_update = false; 1317 1318 bkey_reassemble(&g.k_i, k); 1319 1320 for (i = gens_offset; i < gens_end_offset; i++) { 1321 if (fsck_err_on(g.v.gens[i], trans, 1322 bucket_gens_hole_wrong, 1323 "hole in alloc btree at %llu:%llu with nonzero gen in bucket_gens btree (%u)", 1324 bucket_gens_pos_to_alloc(k.k->p, i).inode, 1325 bucket_gens_pos_to_alloc(k.k->p, i).offset, 1326 g.v.gens[i])) { 1327 g.v.gens[i] = 0; 1328 need_update = true; 1329 } 1330 } 1331 1332 if (need_update) { 1333 struct bkey_i *u = bch2_trans_kmalloc(trans, sizeof(g)); 1334 1335 ret = PTR_ERR_OR_ZERO(u); 1336 if (ret) 1337 goto err; 1338 1339 memcpy(u, &g, sizeof(g)); 1340 1341 ret = bch2_trans_update(trans, bucket_gens_iter, u, 0); 1342 if (ret) 1343 goto err; 1344 } 1345 } 1346 1347 *end = bkey_min(*end, bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0)); 1348 err: 1349 fsck_err: 1350 printbuf_exit(&buf); 1351 return ret; 1352 } 1353 1354 struct check_discard_freespace_key_async { 1355 struct work_struct work; 1356 struct bch_fs *c; 1357 struct bbpos pos; 1358 }; 1359 1360 static int bch2_recheck_discard_freespace_key(struct btree_trans *trans, struct bbpos pos) 1361 { 1362 struct btree_iter iter; 1363 struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, pos.btree, pos.pos, 0); 1364 int ret = bkey_err(k); 1365 if (ret) 1366 return ret; 1367 1368 u8 gen; 1369 ret = k.k->type != KEY_TYPE_set 1370 ? bch2_check_discard_freespace_key(trans, &iter, &gen, false) 1371 : 0; 1372 bch2_trans_iter_exit(trans, &iter); 1373 return ret; 1374 } 1375 1376 static void check_discard_freespace_key_work(struct work_struct *work) 1377 { 1378 struct check_discard_freespace_key_async *w = 1379 container_of(work, struct check_discard_freespace_key_async, work); 1380 1381 bch2_trans_do(w->c, bch2_recheck_discard_freespace_key(trans, w->pos)); 1382 bch2_write_ref_put(w->c, BCH_WRITE_REF_check_discard_freespace_key); 1383 kfree(w); 1384 } 1385 1386 int bch2_check_discard_freespace_key(struct btree_trans *trans, struct btree_iter *iter, u8 *gen, 1387 bool async_repair) 1388 { 1389 struct bch_fs *c = trans->c; 1390 enum bch_data_type state = iter->btree_id == BTREE_ID_need_discard 1391 ? BCH_DATA_need_discard 1392 : BCH_DATA_free; 1393 struct printbuf buf = PRINTBUF; 1394 1395 struct bpos bucket = iter->pos; 1396 bucket.offset &= ~(~0ULL << 56); 1397 u64 genbits = iter->pos.offset & (~0ULL << 56); 1398 1399 struct btree_iter alloc_iter; 1400 struct bkey_s_c alloc_k = bch2_bkey_get_iter(trans, &alloc_iter, 1401 BTREE_ID_alloc, bucket, 1402 async_repair ? BTREE_ITER_cached : 0); 1403 int ret = bkey_err(alloc_k); 1404 if (ret) 1405 return ret; 1406 1407 if (!bch2_dev_bucket_exists(c, bucket)) { 1408 if (fsck_err(trans, need_discard_freespace_key_to_invalid_dev_bucket, 1409 "entry in %s btree for nonexistant dev:bucket %llu:%llu", 1410 bch2_btree_id_str(iter->btree_id), bucket.inode, bucket.offset)) 1411 goto delete; 1412 ret = 1; 1413 goto out; 1414 } 1415 1416 struct bch_alloc_v4 a_convert; 1417 const struct bch_alloc_v4 *a = bch2_alloc_to_v4(alloc_k, &a_convert); 1418 1419 if (a->data_type != state || 1420 (state == BCH_DATA_free && 1421 genbits != alloc_freespace_genbits(*a))) { 1422 if (fsck_err(trans, need_discard_freespace_key_bad, 1423 "%s\nincorrectly set at %s:%llu:%llu:0 (free %u, genbits %llu should be %llu)", 1424 (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf), 1425 bch2_btree_id_str(iter->btree_id), 1426 iter->pos.inode, 1427 iter->pos.offset, 1428 a->data_type == state, 1429 genbits >> 56, alloc_freespace_genbits(*a) >> 56)) 1430 goto delete; 1431 ret = 1; 1432 goto out; 1433 } 1434 1435 *gen = a->gen; 1436 out: 1437 fsck_err: 1438 bch2_set_btree_iter_dontneed(&alloc_iter); 1439 bch2_trans_iter_exit(trans, &alloc_iter); 1440 printbuf_exit(&buf); 1441 return ret; 1442 delete: 1443 if (!async_repair) { 1444 ret = bch2_btree_bit_mod_iter(trans, iter, false) ?: 1445 bch2_trans_commit(trans, NULL, NULL, 1446 BCH_TRANS_COMMIT_no_enospc) ?: 1447 -BCH_ERR_transaction_restart_commit; 1448 goto out; 1449 } else { 1450 /* 1451 * We can't repair here when called from the allocator path: the 1452 * commit will recurse back into the allocator 1453 */ 1454 struct check_discard_freespace_key_async *w = 1455 kzalloc(sizeof(*w), GFP_KERNEL); 1456 if (!w) 1457 goto out; 1458 1459 if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_check_discard_freespace_key)) { 1460 kfree(w); 1461 goto out; 1462 } 1463 1464 INIT_WORK(&w->work, check_discard_freespace_key_work); 1465 w->c = c; 1466 w->pos = BBPOS(iter->btree_id, iter->pos); 1467 queue_work(c->write_ref_wq, &w->work); 1468 goto out; 1469 } 1470 } 1471 1472 static int bch2_check_discard_freespace_key_fsck(struct btree_trans *trans, struct btree_iter *iter) 1473 { 1474 u8 gen; 1475 int ret = bch2_check_discard_freespace_key(trans, iter, &gen, false); 1476 return ret < 0 ? ret : 0; 1477 } 1478 1479 /* 1480 * We've already checked that generation numbers in the bucket_gens btree are 1481 * valid for buckets that exist; this just checks for keys for nonexistent 1482 * buckets. 1483 */ 1484 static noinline_for_stack 1485 int bch2_check_bucket_gens_key(struct btree_trans *trans, 1486 struct btree_iter *iter, 1487 struct bkey_s_c k) 1488 { 1489 struct bch_fs *c = trans->c; 1490 struct bkey_i_bucket_gens g; 1491 u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset; 1492 u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset; 1493 u64 b; 1494 bool need_update = false; 1495 struct printbuf buf = PRINTBUF; 1496 int ret = 0; 1497 1498 BUG_ON(k.k->type != KEY_TYPE_bucket_gens); 1499 bkey_reassemble(&g.k_i, k); 1500 1501 struct bch_dev *ca = bch2_dev_tryget_noerror(c, k.k->p.inode); 1502 if (!ca) { 1503 if (fsck_err(trans, bucket_gens_to_invalid_dev, 1504 "bucket_gens key for invalid device:\n%s", 1505 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) 1506 ret = bch2_btree_delete_at(trans, iter, 0); 1507 goto out; 1508 } 1509 1510 if (fsck_err_on(end <= ca->mi.first_bucket || 1511 start >= ca->mi.nbuckets, 1512 trans, bucket_gens_to_invalid_buckets, 1513 "bucket_gens key for invalid buckets:\n%s", 1514 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { 1515 ret = bch2_btree_delete_at(trans, iter, 0); 1516 goto out; 1517 } 1518 1519 for (b = start; b < ca->mi.first_bucket; b++) 1520 if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK], 1521 trans, bucket_gens_nonzero_for_invalid_buckets, 1522 "bucket_gens key has nonzero gen for invalid bucket")) { 1523 g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0; 1524 need_update = true; 1525 } 1526 1527 for (b = ca->mi.nbuckets; b < end; b++) 1528 if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK], 1529 trans, bucket_gens_nonzero_for_invalid_buckets, 1530 "bucket_gens key has nonzero gen for invalid bucket")) { 1531 g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0; 1532 need_update = true; 1533 } 1534 1535 if (need_update) { 1536 struct bkey_i *u = bch2_trans_kmalloc(trans, sizeof(g)); 1537 1538 ret = PTR_ERR_OR_ZERO(u); 1539 if (ret) 1540 goto out; 1541 1542 memcpy(u, &g, sizeof(g)); 1543 ret = bch2_trans_update(trans, iter, u, 0); 1544 } 1545 out: 1546 fsck_err: 1547 bch2_dev_put(ca); 1548 printbuf_exit(&buf); 1549 return ret; 1550 } 1551 1552 int bch2_check_alloc_info(struct bch_fs *c) 1553 { 1554 struct btree_trans *trans = bch2_trans_get(c); 1555 struct btree_iter iter, discard_iter, freespace_iter, bucket_gens_iter; 1556 struct bch_dev *ca = NULL; 1557 struct bkey hole; 1558 struct bkey_s_c k; 1559 int ret = 0; 1560 1561 bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS_MIN, 1562 BTREE_ITER_prefetch); 1563 bch2_trans_iter_init(trans, &discard_iter, BTREE_ID_need_discard, POS_MIN, 1564 BTREE_ITER_prefetch); 1565 bch2_trans_iter_init(trans, &freespace_iter, BTREE_ID_freespace, POS_MIN, 1566 BTREE_ITER_prefetch); 1567 bch2_trans_iter_init(trans, &bucket_gens_iter, BTREE_ID_bucket_gens, POS_MIN, 1568 BTREE_ITER_prefetch); 1569 1570 while (1) { 1571 struct bpos next; 1572 1573 bch2_trans_begin(trans); 1574 1575 k = bch2_get_key_or_real_bucket_hole(&iter, &ca, &hole); 1576 ret = bkey_err(k); 1577 if (ret) 1578 goto bkey_err; 1579 1580 if (!k.k) 1581 break; 1582 1583 if (k.k->type) { 1584 next = bpos_nosnap_successor(k.k->p); 1585 1586 ret = bch2_check_alloc_key(trans, 1587 k, &iter, 1588 &discard_iter, 1589 &freespace_iter, 1590 &bucket_gens_iter); 1591 if (ret) 1592 goto bkey_err; 1593 } else { 1594 next = k.k->p; 1595 1596 ret = bch2_check_alloc_hole_freespace(trans, ca, 1597 bkey_start_pos(k.k), 1598 &next, 1599 &freespace_iter) ?: 1600 bch2_check_alloc_hole_bucket_gens(trans, 1601 bkey_start_pos(k.k), 1602 &next, 1603 &bucket_gens_iter); 1604 if (ret) 1605 goto bkey_err; 1606 } 1607 1608 ret = bch2_trans_commit(trans, NULL, NULL, 1609 BCH_TRANS_COMMIT_no_enospc); 1610 if (ret) 1611 goto bkey_err; 1612 1613 bch2_btree_iter_set_pos(&iter, next); 1614 bkey_err: 1615 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 1616 continue; 1617 if (ret) 1618 break; 1619 } 1620 bch2_trans_iter_exit(trans, &bucket_gens_iter); 1621 bch2_trans_iter_exit(trans, &freespace_iter); 1622 bch2_trans_iter_exit(trans, &discard_iter); 1623 bch2_trans_iter_exit(trans, &iter); 1624 bch2_dev_put(ca); 1625 ca = NULL; 1626 1627 if (ret < 0) 1628 goto err; 1629 1630 ret = for_each_btree_key(trans, iter, 1631 BTREE_ID_need_discard, POS_MIN, 1632 BTREE_ITER_prefetch, k, 1633 bch2_check_discard_freespace_key_fsck(trans, &iter)); 1634 if (ret) 1635 goto err; 1636 1637 bch2_trans_iter_init(trans, &iter, BTREE_ID_freespace, POS_MIN, 1638 BTREE_ITER_prefetch); 1639 while (1) { 1640 bch2_trans_begin(trans); 1641 k = bch2_btree_iter_peek(&iter); 1642 if (!k.k) 1643 break; 1644 1645 ret = bkey_err(k) ?: 1646 bch2_check_discard_freespace_key_fsck(trans, &iter); 1647 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) { 1648 ret = 0; 1649 continue; 1650 } 1651 if (ret) { 1652 struct printbuf buf = PRINTBUF; 1653 bch2_bkey_val_to_text(&buf, c, k); 1654 1655 bch_err(c, "while checking %s", buf.buf); 1656 printbuf_exit(&buf); 1657 break; 1658 } 1659 1660 bch2_btree_iter_set_pos(&iter, bpos_nosnap_successor(iter.pos)); 1661 } 1662 bch2_trans_iter_exit(trans, &iter); 1663 if (ret) 1664 goto err; 1665 1666 ret = for_each_btree_key_commit(trans, iter, 1667 BTREE_ID_bucket_gens, POS_MIN, 1668 BTREE_ITER_prefetch, k, 1669 NULL, NULL, BCH_TRANS_COMMIT_no_enospc, 1670 bch2_check_bucket_gens_key(trans, &iter, k)); 1671 err: 1672 bch2_trans_put(trans); 1673 bch_err_fn(c, ret); 1674 return ret; 1675 } 1676 1677 static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans, 1678 struct btree_iter *alloc_iter, 1679 struct bkey_buf *last_flushed) 1680 { 1681 struct bch_fs *c = trans->c; 1682 struct bch_alloc_v4 a_convert; 1683 const struct bch_alloc_v4 *a; 1684 struct bkey_s_c alloc_k; 1685 struct printbuf buf = PRINTBUF; 1686 int ret; 1687 1688 alloc_k = bch2_btree_iter_peek(alloc_iter); 1689 if (!alloc_k.k) 1690 return 0; 1691 1692 ret = bkey_err(alloc_k); 1693 if (ret) 1694 return ret; 1695 1696 struct bch_dev *ca = bch2_dev_tryget_noerror(c, alloc_k.k->p.inode); 1697 if (!ca) 1698 return 0; 1699 1700 a = bch2_alloc_to_v4(alloc_k, &a_convert); 1701 1702 u64 lru_idx = alloc_lru_idx_fragmentation(*a, ca); 1703 if (lru_idx) { 1704 ret = bch2_lru_check_set(trans, BCH_LRU_BUCKET_FRAGMENTATION, 1705 bucket_to_u64(alloc_k.k->p), 1706 lru_idx, alloc_k, last_flushed); 1707 if (ret) 1708 goto err; 1709 } 1710 1711 if (a->data_type != BCH_DATA_cached) 1712 goto err; 1713 1714 if (fsck_err_on(!a->io_time[READ], 1715 trans, alloc_key_cached_but_read_time_zero, 1716 "cached bucket with read_time 0\n%s", 1717 (printbuf_reset(&buf), 1718 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) { 1719 struct bkey_i_alloc_v4 *a_mut = 1720 bch2_alloc_to_v4_mut(trans, alloc_k); 1721 ret = PTR_ERR_OR_ZERO(a_mut); 1722 if (ret) 1723 goto err; 1724 1725 a_mut->v.io_time[READ] = bch2_current_io_time(c, READ); 1726 ret = bch2_trans_update(trans, alloc_iter, 1727 &a_mut->k_i, BTREE_TRIGGER_norun); 1728 if (ret) 1729 goto err; 1730 1731 a = &a_mut->v; 1732 } 1733 1734 ret = bch2_lru_check_set(trans, alloc_k.k->p.inode, 1735 bucket_to_u64(alloc_k.k->p), 1736 a->io_time[READ], 1737 alloc_k, last_flushed); 1738 if (ret) 1739 goto err; 1740 err: 1741 fsck_err: 1742 bch2_dev_put(ca); 1743 printbuf_exit(&buf); 1744 return ret; 1745 } 1746 1747 int bch2_check_alloc_to_lru_refs(struct bch_fs *c) 1748 { 1749 struct bkey_buf last_flushed; 1750 1751 bch2_bkey_buf_init(&last_flushed); 1752 bkey_init(&last_flushed.k->k); 1753 1754 int ret = bch2_trans_run(c, 1755 for_each_btree_key_commit(trans, iter, BTREE_ID_alloc, 1756 POS_MIN, BTREE_ITER_prefetch, k, 1757 NULL, NULL, BCH_TRANS_COMMIT_no_enospc, 1758 bch2_check_alloc_to_lru_ref(trans, &iter, &last_flushed))) ?: 1759 bch2_check_stripe_to_lru_refs(c); 1760 1761 bch2_bkey_buf_exit(&last_flushed, c); 1762 bch_err_fn(c, ret); 1763 return ret; 1764 } 1765 1766 static int discard_in_flight_add(struct bch_dev *ca, u64 bucket, bool in_progress) 1767 { 1768 int ret; 1769 1770 mutex_lock(&ca->discard_buckets_in_flight_lock); 1771 darray_for_each(ca->discard_buckets_in_flight, i) 1772 if (i->bucket == bucket) { 1773 ret = -BCH_ERR_EEXIST_discard_in_flight_add; 1774 goto out; 1775 } 1776 1777 ret = darray_push(&ca->discard_buckets_in_flight, ((struct discard_in_flight) { 1778 .in_progress = in_progress, 1779 .bucket = bucket, 1780 })); 1781 out: 1782 mutex_unlock(&ca->discard_buckets_in_flight_lock); 1783 return ret; 1784 } 1785 1786 static void discard_in_flight_remove(struct bch_dev *ca, u64 bucket) 1787 { 1788 mutex_lock(&ca->discard_buckets_in_flight_lock); 1789 darray_for_each(ca->discard_buckets_in_flight, i) 1790 if (i->bucket == bucket) { 1791 BUG_ON(!i->in_progress); 1792 darray_remove_item(&ca->discard_buckets_in_flight, i); 1793 goto found; 1794 } 1795 BUG(); 1796 found: 1797 mutex_unlock(&ca->discard_buckets_in_flight_lock); 1798 } 1799 1800 struct discard_buckets_state { 1801 u64 seen; 1802 u64 open; 1803 u64 need_journal_commit; 1804 u64 discarded; 1805 }; 1806 1807 /* 1808 * This is needed because discard is both a filesystem option and a device 1809 * option, and mount options are supposed to apply to that mount and not be 1810 * persisted, i.e. if it's set as a mount option we can't propagate it to the 1811 * device. 1812 */ 1813 static inline bool discard_opt_enabled(struct bch_fs *c, struct bch_dev *ca) 1814 { 1815 return test_bit(BCH_FS_discard_mount_opt_set, &c->flags) 1816 ? c->opts.discard 1817 : ca->mi.discard; 1818 } 1819 1820 static int bch2_discard_one_bucket(struct btree_trans *trans, 1821 struct bch_dev *ca, 1822 struct btree_iter *need_discard_iter, 1823 struct bpos *discard_pos_done, 1824 struct discard_buckets_state *s, 1825 bool fastpath) 1826 { 1827 struct bch_fs *c = trans->c; 1828 struct bpos pos = need_discard_iter->pos; 1829 struct btree_iter iter = { NULL }; 1830 struct bkey_s_c k; 1831 struct bkey_i_alloc_v4 *a; 1832 struct printbuf buf = PRINTBUF; 1833 bool discard_locked = false; 1834 int ret = 0; 1835 1836 if (bch2_bucket_is_open_safe(c, pos.inode, pos.offset)) { 1837 s->open++; 1838 goto out; 1839 } 1840 1841 u64 seq_ready = bch2_bucket_journal_seq_ready(&c->buckets_waiting_for_journal, 1842 pos.inode, pos.offset); 1843 if (seq_ready > c->journal.flushed_seq_ondisk) { 1844 if (seq_ready > c->journal.flushing_seq) 1845 s->need_journal_commit++; 1846 goto out; 1847 } 1848 1849 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc, 1850 need_discard_iter->pos, 1851 BTREE_ITER_cached); 1852 ret = bkey_err(k); 1853 if (ret) 1854 goto out; 1855 1856 a = bch2_alloc_to_v4_mut(trans, k); 1857 ret = PTR_ERR_OR_ZERO(a); 1858 if (ret) 1859 goto out; 1860 1861 if (a->v.data_type != BCH_DATA_need_discard) { 1862 if (need_discard_or_freespace_err(trans, k, true, true, true)) { 1863 ret = bch2_btree_bit_mod_iter(trans, need_discard_iter, false); 1864 if (ret) 1865 goto out; 1866 goto commit; 1867 } 1868 1869 goto out; 1870 } 1871 1872 if (!fastpath) { 1873 if (discard_in_flight_add(ca, iter.pos.offset, true)) 1874 goto out; 1875 1876 discard_locked = true; 1877 } 1878 1879 if (!bkey_eq(*discard_pos_done, iter.pos)) { 1880 s->discarded++; 1881 *discard_pos_done = iter.pos; 1882 1883 if (discard_opt_enabled(c, ca) && !c->opts.nochanges) { 1884 /* 1885 * This works without any other locks because this is the only 1886 * thread that removes items from the need_discard tree 1887 */ 1888 bch2_trans_unlock_long(trans); 1889 blkdev_issue_discard(ca->disk_sb.bdev, 1890 k.k->p.offset * ca->mi.bucket_size, 1891 ca->mi.bucket_size, 1892 GFP_KERNEL); 1893 ret = bch2_trans_relock_notrace(trans); 1894 if (ret) 1895 goto out; 1896 } 1897 } 1898 1899 SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false); 1900 alloc_data_type_set(&a->v, a->v.data_type); 1901 1902 ret = bch2_trans_update(trans, &iter, &a->k_i, 0); 1903 if (ret) 1904 goto out; 1905 commit: 1906 ret = bch2_trans_commit(trans, NULL, NULL, 1907 BCH_WATERMARK_btree| 1908 BCH_TRANS_COMMIT_no_enospc); 1909 if (ret) 1910 goto out; 1911 1912 if (!fastpath) 1913 count_event(c, bucket_discard); 1914 else 1915 count_event(c, bucket_discard_fast); 1916 out: 1917 fsck_err: 1918 if (discard_locked) 1919 discard_in_flight_remove(ca, iter.pos.offset); 1920 if (!ret) 1921 s->seen++; 1922 bch2_trans_iter_exit(trans, &iter); 1923 printbuf_exit(&buf); 1924 return ret; 1925 } 1926 1927 static void bch2_do_discards_work(struct work_struct *work) 1928 { 1929 struct bch_dev *ca = container_of(work, struct bch_dev, discard_work); 1930 struct bch_fs *c = ca->fs; 1931 struct discard_buckets_state s = {}; 1932 struct bpos discard_pos_done = POS_MAX; 1933 int ret; 1934 1935 /* 1936 * We're doing the commit in bch2_discard_one_bucket instead of using 1937 * for_each_btree_key_commit() so that we can increment counters after 1938 * successful commit: 1939 */ 1940 ret = bch2_trans_run(c, 1941 for_each_btree_key_max(trans, iter, 1942 BTREE_ID_need_discard, 1943 POS(ca->dev_idx, 0), 1944 POS(ca->dev_idx, U64_MAX), 0, k, 1945 bch2_discard_one_bucket(trans, ca, &iter, &discard_pos_done, &s, false))); 1946 1947 if (s.need_journal_commit > dev_buckets_available(ca, BCH_WATERMARK_normal)) 1948 bch2_journal_flush_async(&c->journal, NULL); 1949 1950 trace_discard_buckets(c, s.seen, s.open, s.need_journal_commit, s.discarded, 1951 bch2_err_str(ret)); 1952 1953 percpu_ref_put(&ca->io_ref); 1954 bch2_write_ref_put(c, BCH_WRITE_REF_discard); 1955 } 1956 1957 void bch2_dev_do_discards(struct bch_dev *ca) 1958 { 1959 struct bch_fs *c = ca->fs; 1960 1961 if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_discard)) 1962 return; 1963 1964 if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE)) 1965 goto put_write_ref; 1966 1967 if (queue_work(c->write_ref_wq, &ca->discard_work)) 1968 return; 1969 1970 percpu_ref_put(&ca->io_ref); 1971 put_write_ref: 1972 bch2_write_ref_put(c, BCH_WRITE_REF_discard); 1973 } 1974 1975 void bch2_do_discards(struct bch_fs *c) 1976 { 1977 for_each_member_device(c, ca) 1978 bch2_dev_do_discards(ca); 1979 } 1980 1981 static int bch2_do_discards_fast_one(struct btree_trans *trans, 1982 struct bch_dev *ca, 1983 u64 bucket, 1984 struct bpos *discard_pos_done, 1985 struct discard_buckets_state *s) 1986 { 1987 struct btree_iter need_discard_iter; 1988 struct bkey_s_c discard_k = bch2_bkey_get_iter(trans, &need_discard_iter, 1989 BTREE_ID_need_discard, POS(ca->dev_idx, bucket), 0); 1990 int ret = bkey_err(discard_k); 1991 if (ret) 1992 return ret; 1993 1994 if (log_fsck_err_on(discard_k.k->type != KEY_TYPE_set, 1995 trans, discarding_bucket_not_in_need_discard_btree, 1996 "attempting to discard bucket %u:%llu not in need_discard btree", 1997 ca->dev_idx, bucket)) 1998 goto out; 1999 2000 ret = bch2_discard_one_bucket(trans, ca, &need_discard_iter, discard_pos_done, s, true); 2001 out: 2002 fsck_err: 2003 bch2_trans_iter_exit(trans, &need_discard_iter); 2004 return ret; 2005 } 2006 2007 static void bch2_do_discards_fast_work(struct work_struct *work) 2008 { 2009 struct bch_dev *ca = container_of(work, struct bch_dev, discard_fast_work); 2010 struct bch_fs *c = ca->fs; 2011 struct discard_buckets_state s = {}; 2012 struct bpos discard_pos_done = POS_MAX; 2013 struct btree_trans *trans = bch2_trans_get(c); 2014 int ret = 0; 2015 2016 while (1) { 2017 bool got_bucket = false; 2018 u64 bucket; 2019 2020 mutex_lock(&ca->discard_buckets_in_flight_lock); 2021 darray_for_each(ca->discard_buckets_in_flight, i) { 2022 if (i->in_progress) 2023 continue; 2024 2025 got_bucket = true; 2026 bucket = i->bucket; 2027 i->in_progress = true; 2028 break; 2029 } 2030 mutex_unlock(&ca->discard_buckets_in_flight_lock); 2031 2032 if (!got_bucket) 2033 break; 2034 2035 ret = lockrestart_do(trans, 2036 bch2_do_discards_fast_one(trans, ca, bucket, &discard_pos_done, &s)); 2037 bch_err_fn(c, ret); 2038 2039 discard_in_flight_remove(ca, bucket); 2040 2041 if (ret) 2042 break; 2043 } 2044 2045 trace_discard_buckets_fast(c, s.seen, s.open, s.need_journal_commit, s.discarded, bch2_err_str(ret)); 2046 2047 bch2_trans_put(trans); 2048 percpu_ref_put(&ca->io_ref); 2049 bch2_write_ref_put(c, BCH_WRITE_REF_discard_fast); 2050 } 2051 2052 static void bch2_discard_one_bucket_fast(struct bch_dev *ca, u64 bucket) 2053 { 2054 struct bch_fs *c = ca->fs; 2055 2056 if (discard_in_flight_add(ca, bucket, false)) 2057 return; 2058 2059 if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_discard_fast)) 2060 return; 2061 2062 if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE)) 2063 goto put_ref; 2064 2065 if (queue_work(c->write_ref_wq, &ca->discard_fast_work)) 2066 return; 2067 2068 percpu_ref_put(&ca->io_ref); 2069 put_ref: 2070 bch2_write_ref_put(c, BCH_WRITE_REF_discard_fast); 2071 } 2072 2073 static int invalidate_one_bp(struct btree_trans *trans, 2074 struct bch_dev *ca, 2075 struct bkey_s_c_backpointer bp, 2076 struct bkey_buf *last_flushed) 2077 { 2078 struct btree_iter extent_iter; 2079 struct bkey_s_c extent_k = 2080 bch2_backpointer_get_key(trans, bp, &extent_iter, 0, last_flushed); 2081 int ret = bkey_err(extent_k); 2082 if (ret) 2083 return ret; 2084 2085 struct bkey_i *n = 2086 bch2_bkey_make_mut(trans, &extent_iter, &extent_k, 2087 BTREE_UPDATE_internal_snapshot_node); 2088 ret = PTR_ERR_OR_ZERO(n); 2089 if (ret) 2090 goto err; 2091 2092 bch2_bkey_drop_device(bkey_i_to_s(n), ca->dev_idx); 2093 err: 2094 bch2_trans_iter_exit(trans, &extent_iter); 2095 return ret; 2096 } 2097 2098 static int invalidate_one_bucket_by_bps(struct btree_trans *trans, 2099 struct bch_dev *ca, 2100 struct bpos bucket, 2101 u8 gen, 2102 struct bkey_buf *last_flushed) 2103 { 2104 struct bpos bp_start = bucket_pos_to_bp_start(ca, bucket); 2105 struct bpos bp_end = bucket_pos_to_bp_end(ca, bucket); 2106 2107 return for_each_btree_key_max_commit(trans, iter, BTREE_ID_backpointers, 2108 bp_start, bp_end, 0, k, 2109 NULL, NULL, 2110 BCH_WATERMARK_btree| 2111 BCH_TRANS_COMMIT_no_enospc, ({ 2112 if (k.k->type != KEY_TYPE_backpointer) 2113 continue; 2114 2115 struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k); 2116 2117 if (bp.v->bucket_gen != gen) 2118 continue; 2119 2120 /* filter out bps with gens that don't match */ 2121 2122 invalidate_one_bp(trans, ca, bp, last_flushed); 2123 })); 2124 } 2125 2126 noinline_for_stack 2127 static int invalidate_one_bucket(struct btree_trans *trans, 2128 struct bch_dev *ca, 2129 struct btree_iter *lru_iter, 2130 struct bkey_s_c lru_k, 2131 struct bkey_buf *last_flushed, 2132 s64 *nr_to_invalidate) 2133 { 2134 struct bch_fs *c = trans->c; 2135 struct printbuf buf = PRINTBUF; 2136 struct bpos bucket = u64_to_bucket(lru_k.k->p.offset); 2137 struct btree_iter alloc_iter = {}; 2138 int ret = 0; 2139 2140 if (*nr_to_invalidate <= 0) 2141 return 1; 2142 2143 if (!bch2_dev_bucket_exists(c, bucket)) { 2144 if (fsck_err(trans, lru_entry_to_invalid_bucket, 2145 "lru key points to nonexistent device:bucket %llu:%llu", 2146 bucket.inode, bucket.offset)) 2147 return bch2_btree_bit_mod_buffered(trans, BTREE_ID_lru, lru_iter->pos, false); 2148 goto out; 2149 } 2150 2151 if (bch2_bucket_is_open_safe(c, bucket.inode, bucket.offset)) 2152 return 0; 2153 2154 struct bkey_s_c alloc_k = bch2_bkey_get_iter(trans, &alloc_iter, 2155 BTREE_ID_alloc, bucket, 2156 BTREE_ITER_cached); 2157 ret = bkey_err(alloc_k); 2158 if (ret) 2159 return ret; 2160 2161 struct bch_alloc_v4 a_convert; 2162 const struct bch_alloc_v4 *a = bch2_alloc_to_v4(alloc_k, &a_convert); 2163 2164 /* We expect harmless races here due to the btree write buffer: */ 2165 if (lru_pos_time(lru_iter->pos) != alloc_lru_idx_read(*a)) 2166 goto out; 2167 2168 /* 2169 * Impossible since alloc_lru_idx_read() only returns nonzero if the 2170 * bucket is supposed to be on the cached bucket LRU (i.e. 2171 * BCH_DATA_cached) 2172 * 2173 * bch2_lru_validate() also disallows lru keys with lru_pos_time() == 0 2174 */ 2175 BUG_ON(a->data_type != BCH_DATA_cached); 2176 BUG_ON(a->dirty_sectors); 2177 2178 if (!a->cached_sectors) 2179 bch_err(c, "invalidating empty bucket, confused"); 2180 2181 unsigned cached_sectors = a->cached_sectors; 2182 u8 gen = a->gen; 2183 2184 ret = invalidate_one_bucket_by_bps(trans, ca, bucket, gen, last_flushed); 2185 if (ret) 2186 goto out; 2187 2188 trace_and_count(c, bucket_invalidate, c, bucket.inode, bucket.offset, cached_sectors); 2189 --*nr_to_invalidate; 2190 out: 2191 fsck_err: 2192 bch2_trans_iter_exit(trans, &alloc_iter); 2193 printbuf_exit(&buf); 2194 return ret; 2195 } 2196 2197 static struct bkey_s_c next_lru_key(struct btree_trans *trans, struct btree_iter *iter, 2198 struct bch_dev *ca, bool *wrapped) 2199 { 2200 struct bkey_s_c k; 2201 again: 2202 k = bch2_btree_iter_peek_max(iter, lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX)); 2203 if (!k.k && !*wrapped) { 2204 bch2_btree_iter_set_pos(iter, lru_pos(ca->dev_idx, 0, 0)); 2205 *wrapped = true; 2206 goto again; 2207 } 2208 2209 return k; 2210 } 2211 2212 static void bch2_do_invalidates_work(struct work_struct *work) 2213 { 2214 struct bch_dev *ca = container_of(work, struct bch_dev, invalidate_work); 2215 struct bch_fs *c = ca->fs; 2216 struct btree_trans *trans = bch2_trans_get(c); 2217 int ret = 0; 2218 2219 struct bkey_buf last_flushed; 2220 bch2_bkey_buf_init(&last_flushed); 2221 bkey_init(&last_flushed.k->k); 2222 2223 ret = bch2_btree_write_buffer_tryflush(trans); 2224 if (ret) 2225 goto err; 2226 2227 s64 nr_to_invalidate = 2228 should_invalidate_buckets(ca, bch2_dev_usage_read(ca)); 2229 struct btree_iter iter; 2230 bool wrapped = false; 2231 2232 bch2_trans_iter_init(trans, &iter, BTREE_ID_lru, 2233 lru_pos(ca->dev_idx, 0, 2234 ((bch2_current_io_time(c, READ) + U32_MAX) & 2235 LRU_TIME_MAX)), 0); 2236 2237 while (true) { 2238 bch2_trans_begin(trans); 2239 2240 struct bkey_s_c k = next_lru_key(trans, &iter, ca, &wrapped); 2241 ret = bkey_err(k); 2242 if (ret) 2243 goto restart_err; 2244 if (!k.k) 2245 break; 2246 2247 ret = invalidate_one_bucket(trans, ca, &iter, k, &last_flushed, &nr_to_invalidate); 2248 restart_err: 2249 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 2250 continue; 2251 if (ret) 2252 break; 2253 2254 bch2_btree_iter_advance(&iter); 2255 } 2256 bch2_trans_iter_exit(trans, &iter); 2257 err: 2258 bch2_trans_put(trans); 2259 percpu_ref_put(&ca->io_ref); 2260 bch2_bkey_buf_exit(&last_flushed, c); 2261 bch2_write_ref_put(c, BCH_WRITE_REF_invalidate); 2262 } 2263 2264 void bch2_dev_do_invalidates(struct bch_dev *ca) 2265 { 2266 struct bch_fs *c = ca->fs; 2267 2268 if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_invalidate)) 2269 return; 2270 2271 if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE)) 2272 goto put_ref; 2273 2274 if (queue_work(c->write_ref_wq, &ca->invalidate_work)) 2275 return; 2276 2277 percpu_ref_put(&ca->io_ref); 2278 put_ref: 2279 bch2_write_ref_put(c, BCH_WRITE_REF_invalidate); 2280 } 2281 2282 void bch2_do_invalidates(struct bch_fs *c) 2283 { 2284 for_each_member_device(c, ca) 2285 bch2_dev_do_invalidates(ca); 2286 } 2287 2288 int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca, 2289 u64 bucket_start, u64 bucket_end) 2290 { 2291 struct btree_trans *trans = bch2_trans_get(c); 2292 struct btree_iter iter; 2293 struct bkey_s_c k; 2294 struct bkey hole; 2295 struct bpos end = POS(ca->dev_idx, bucket_end); 2296 struct bch_member *m; 2297 unsigned long last_updated = jiffies; 2298 int ret; 2299 2300 BUG_ON(bucket_start > bucket_end); 2301 BUG_ON(bucket_end > ca->mi.nbuckets); 2302 2303 bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, 2304 POS(ca->dev_idx, max_t(u64, ca->mi.first_bucket, bucket_start)), 2305 BTREE_ITER_prefetch); 2306 /* 2307 * Scan the alloc btree for every bucket on @ca, and add buckets to the 2308 * freespace/need_discard/need_gc_gens btrees as needed: 2309 */ 2310 while (1) { 2311 if (time_after(jiffies, last_updated + HZ * 10)) { 2312 bch_info(ca, "%s: currently at %llu/%llu", 2313 __func__, iter.pos.offset, ca->mi.nbuckets); 2314 last_updated = jiffies; 2315 } 2316 2317 bch2_trans_begin(trans); 2318 2319 if (bkey_ge(iter.pos, end)) { 2320 ret = 0; 2321 break; 2322 } 2323 2324 k = bch2_get_key_or_hole(&iter, end, &hole); 2325 ret = bkey_err(k); 2326 if (ret) 2327 goto bkey_err; 2328 2329 if (k.k->type) { 2330 /* 2331 * We process live keys in the alloc btree one at a 2332 * time: 2333 */ 2334 struct bch_alloc_v4 a_convert; 2335 const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert); 2336 2337 ret = bch2_bucket_do_index(trans, ca, k, a, true) ?: 2338 bch2_trans_commit(trans, NULL, NULL, 2339 BCH_TRANS_COMMIT_no_enospc); 2340 if (ret) 2341 goto bkey_err; 2342 2343 bch2_btree_iter_advance(&iter); 2344 } else { 2345 struct bkey_i *freespace; 2346 2347 freespace = bch2_trans_kmalloc(trans, sizeof(*freespace)); 2348 ret = PTR_ERR_OR_ZERO(freespace); 2349 if (ret) 2350 goto bkey_err; 2351 2352 bkey_init(&freespace->k); 2353 freespace->k.type = KEY_TYPE_set; 2354 freespace->k.p = k.k->p; 2355 freespace->k.size = k.k->size; 2356 2357 ret = bch2_btree_insert_trans(trans, BTREE_ID_freespace, freespace, 0) ?: 2358 bch2_trans_commit(trans, NULL, NULL, 2359 BCH_TRANS_COMMIT_no_enospc); 2360 if (ret) 2361 goto bkey_err; 2362 2363 bch2_btree_iter_set_pos(&iter, k.k->p); 2364 } 2365 bkey_err: 2366 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 2367 continue; 2368 if (ret) 2369 break; 2370 } 2371 2372 bch2_trans_iter_exit(trans, &iter); 2373 bch2_trans_put(trans); 2374 2375 if (ret < 0) { 2376 bch_err_msg(ca, ret, "initializing free space"); 2377 return ret; 2378 } 2379 2380 mutex_lock(&c->sb_lock); 2381 m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx); 2382 SET_BCH_MEMBER_FREESPACE_INITIALIZED(m, true); 2383 mutex_unlock(&c->sb_lock); 2384 2385 return 0; 2386 } 2387 2388 int bch2_fs_freespace_init(struct bch_fs *c) 2389 { 2390 int ret = 0; 2391 bool doing_init = false; 2392 2393 /* 2394 * We can crash during the device add path, so we need to check this on 2395 * every mount: 2396 */ 2397 2398 for_each_member_device(c, ca) { 2399 if (ca->mi.freespace_initialized) 2400 continue; 2401 2402 if (!doing_init) { 2403 bch_info(c, "initializing freespace"); 2404 doing_init = true; 2405 } 2406 2407 ret = bch2_dev_freespace_init(c, ca, 0, ca->mi.nbuckets); 2408 if (ret) { 2409 bch2_dev_put(ca); 2410 bch_err_fn(c, ret); 2411 return ret; 2412 } 2413 } 2414 2415 if (doing_init) { 2416 mutex_lock(&c->sb_lock); 2417 bch2_write_super(c); 2418 mutex_unlock(&c->sb_lock); 2419 bch_verbose(c, "done initializing freespace"); 2420 } 2421 2422 return 0; 2423 } 2424 2425 /* device removal */ 2426 2427 int bch2_dev_remove_alloc(struct bch_fs *c, struct bch_dev *ca) 2428 { 2429 struct bpos start = POS(ca->dev_idx, 0); 2430 struct bpos end = POS(ca->dev_idx, U64_MAX); 2431 int ret; 2432 2433 /* 2434 * We clear the LRU and need_discard btrees first so that we don't race 2435 * with bch2_do_invalidates() and bch2_do_discards() 2436 */ 2437 ret = bch2_dev_remove_stripes(c, ca->dev_idx) ?: 2438 bch2_btree_delete_range(c, BTREE_ID_lru, start, end, 2439 BTREE_TRIGGER_norun, NULL) ?: 2440 bch2_btree_delete_range(c, BTREE_ID_need_discard, start, end, 2441 BTREE_TRIGGER_norun, NULL) ?: 2442 bch2_btree_delete_range(c, BTREE_ID_freespace, start, end, 2443 BTREE_TRIGGER_norun, NULL) ?: 2444 bch2_btree_delete_range(c, BTREE_ID_backpointers, start, end, 2445 BTREE_TRIGGER_norun, NULL) ?: 2446 bch2_btree_delete_range(c, BTREE_ID_bucket_gens, start, end, 2447 BTREE_TRIGGER_norun, NULL) ?: 2448 bch2_btree_delete_range(c, BTREE_ID_alloc, start, end, 2449 BTREE_TRIGGER_norun, NULL) ?: 2450 bch2_dev_usage_remove(c, ca->dev_idx); 2451 bch_err_msg(ca, ret, "removing dev alloc info"); 2452 return ret; 2453 } 2454 2455 /* Bucket IO clocks: */ 2456 2457 static int __bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev, 2458 size_t bucket_nr, int rw) 2459 { 2460 struct bch_fs *c = trans->c; 2461 2462 struct btree_iter iter; 2463 struct bkey_i_alloc_v4 *a = 2464 bch2_trans_start_alloc_update_noupdate(trans, &iter, POS(dev, bucket_nr)); 2465 int ret = PTR_ERR_OR_ZERO(a); 2466 if (ret) 2467 return ret; 2468 2469 u64 now = bch2_current_io_time(c, rw); 2470 if (a->v.io_time[rw] == now) 2471 goto out; 2472 2473 a->v.io_time[rw] = now; 2474 2475 ret = bch2_trans_update(trans, &iter, &a->k_i, 0) ?: 2476 bch2_trans_commit(trans, NULL, NULL, 0); 2477 out: 2478 bch2_trans_iter_exit(trans, &iter); 2479 return ret; 2480 } 2481 2482 int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev, 2483 size_t bucket_nr, int rw) 2484 { 2485 if (bch2_trans_relock(trans)) 2486 bch2_trans_begin(trans); 2487 2488 return nested_lockrestart_do(trans, __bch2_bucket_io_time_reset(trans, dev, bucket_nr, rw)); 2489 } 2490 2491 /* Startup/shutdown (ro/rw): */ 2492 2493 void bch2_recalc_capacity(struct bch_fs *c) 2494 { 2495 u64 capacity = 0, reserved_sectors = 0, gc_reserve; 2496 unsigned bucket_size_max = 0; 2497 unsigned long ra_pages = 0; 2498 2499 lockdep_assert_held(&c->state_lock); 2500 2501 for_each_online_member(c, ca) { 2502 struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi; 2503 2504 ra_pages += bdi->ra_pages; 2505 } 2506 2507 bch2_set_ra_pages(c, ra_pages); 2508 2509 for_each_rw_member(c, ca) { 2510 u64 dev_reserve = 0; 2511 2512 /* 2513 * We need to reserve buckets (from the number 2514 * of currently available buckets) against 2515 * foreground writes so that mainly copygc can 2516 * make forward progress. 2517 * 2518 * We need enough to refill the various reserves 2519 * from scratch - copygc will use its entire 2520 * reserve all at once, then run against when 2521 * its reserve is refilled (from the formerly 2522 * available buckets). 2523 * 2524 * This reserve is just used when considering if 2525 * allocations for foreground writes must wait - 2526 * not -ENOSPC calculations. 2527 */ 2528 2529 dev_reserve += ca->nr_btree_reserve * 2; 2530 dev_reserve += ca->mi.nbuckets >> 6; /* copygc reserve */ 2531 2532 dev_reserve += 1; /* btree write point */ 2533 dev_reserve += 1; /* copygc write point */ 2534 dev_reserve += 1; /* rebalance write point */ 2535 2536 dev_reserve *= ca->mi.bucket_size; 2537 2538 capacity += bucket_to_sector(ca, ca->mi.nbuckets - 2539 ca->mi.first_bucket); 2540 2541 reserved_sectors += dev_reserve * 2; 2542 2543 bucket_size_max = max_t(unsigned, bucket_size_max, 2544 ca->mi.bucket_size); 2545 } 2546 2547 gc_reserve = c->opts.gc_reserve_bytes 2548 ? c->opts.gc_reserve_bytes >> 9 2549 : div64_u64(capacity * c->opts.gc_reserve_percent, 100); 2550 2551 reserved_sectors = max(gc_reserve, reserved_sectors); 2552 2553 reserved_sectors = min(reserved_sectors, capacity); 2554 2555 c->reserved = reserved_sectors; 2556 c->capacity = capacity - reserved_sectors; 2557 2558 c->bucket_size_max = bucket_size_max; 2559 2560 /* Wake up case someone was waiting for buckets */ 2561 closure_wake_up(&c->freelist_wait); 2562 } 2563 2564 u64 bch2_min_rw_member_capacity(struct bch_fs *c) 2565 { 2566 u64 ret = U64_MAX; 2567 2568 for_each_rw_member(c, ca) 2569 ret = min(ret, ca->mi.nbuckets * ca->mi.bucket_size); 2570 return ret; 2571 } 2572 2573 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca) 2574 { 2575 struct open_bucket *ob; 2576 bool ret = false; 2577 2578 for (ob = c->open_buckets; 2579 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); 2580 ob++) { 2581 spin_lock(&ob->lock); 2582 if (ob->valid && !ob->on_partial_list && 2583 ob->dev == ca->dev_idx) 2584 ret = true; 2585 spin_unlock(&ob->lock); 2586 } 2587 2588 return ret; 2589 } 2590 2591 /* device goes ro: */ 2592 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca) 2593 { 2594 lockdep_assert_held(&c->state_lock); 2595 2596 /* First, remove device from allocation groups: */ 2597 2598 for (unsigned i = 0; i < ARRAY_SIZE(c->rw_devs); i++) 2599 clear_bit(ca->dev_idx, c->rw_devs[i].d); 2600 2601 c->rw_devs_change_count++; 2602 2603 /* 2604 * Capacity is calculated based off of devices in allocation groups: 2605 */ 2606 bch2_recalc_capacity(c); 2607 2608 bch2_open_buckets_stop(c, ca, false); 2609 2610 /* 2611 * Wake up threads that were blocked on allocation, so they can notice 2612 * the device can no longer be removed and the capacity has changed: 2613 */ 2614 closure_wake_up(&c->freelist_wait); 2615 2616 /* 2617 * journal_res_get() can block waiting for free space in the journal - 2618 * it needs to notice there may not be devices to allocate from anymore: 2619 */ 2620 wake_up(&c->journal.wait); 2621 2622 /* Now wait for any in flight writes: */ 2623 2624 closure_wait_event(&c->open_buckets_wait, 2625 !bch2_dev_has_open_write_point(c, ca)); 2626 } 2627 2628 /* device goes rw: */ 2629 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca) 2630 { 2631 lockdep_assert_held(&c->state_lock); 2632 2633 for (unsigned i = 0; i < ARRAY_SIZE(c->rw_devs); i++) 2634 if (ca->mi.data_allowed & (1 << i)) 2635 set_bit(ca->dev_idx, c->rw_devs[i].d); 2636 2637 c->rw_devs_change_count++; 2638 } 2639 2640 void bch2_dev_allocator_background_exit(struct bch_dev *ca) 2641 { 2642 darray_exit(&ca->discard_buckets_in_flight); 2643 } 2644 2645 void bch2_dev_allocator_background_init(struct bch_dev *ca) 2646 { 2647 mutex_init(&ca->discard_buckets_in_flight_lock); 2648 INIT_WORK(&ca->discard_work, bch2_do_discards_work); 2649 INIT_WORK(&ca->discard_fast_work, bch2_do_discards_fast_work); 2650 INIT_WORK(&ca->invalidate_work, bch2_do_invalidates_work); 2651 } 2652 2653 void bch2_fs_allocator_background_init(struct bch_fs *c) 2654 { 2655 spin_lock_init(&c->freelist_lock); 2656 } 2657