Lines Matching refs:devs

109 	int size = offsetof(struct r10bio, devs[conf->geo.raid_disks]);  in r10bio_pool_alloc()
167 r10_bio->devs[j].bio = bio; in r10buf_pool_alloc()
174 r10_bio->devs[j].repl_bio = bio; in r10buf_pool_alloc()
181 struct bio *rbio = r10_bio->devs[j].repl_bio; in r10buf_pool_alloc()
188 bio = r10_bio->devs[j].bio; in r10buf_pool_alloc()
216 if (r10_bio->devs[j].bio) in r10buf_pool_alloc()
217 bio_uninit(r10_bio->devs[j].bio); in r10buf_pool_alloc()
218 kfree(r10_bio->devs[j].bio); in r10buf_pool_alloc()
219 if (r10_bio->devs[j].repl_bio) in r10buf_pool_alloc()
220 bio_uninit(r10_bio->devs[j].repl_bio); in r10buf_pool_alloc()
221 kfree(r10_bio->devs[j].repl_bio); in r10buf_pool_alloc()
237 struct bio *bio = r10bio->devs[j].bio; in r10buf_pool_free()
246 bio = r10bio->devs[j].repl_bio; in r10buf_pool_free()
264 struct bio **bio = & r10_bio->devs[i].bio; in put_all_bios()
268 bio = &r10_bio->devs[i].repl_bio; in put_all_bios()
345 conf->mirrors[r10_bio->devs[slot].devnum].head_position = in update_head_pos()
346 r10_bio->devs[slot].addr + (r10_bio->sectors); in update_head_pos()
359 if (r10_bio->devs[slot].bio == bio) in find_bio_disk()
361 if (r10_bio->devs[slot].repl_bio == bio) { in find_bio_disk()
373 return r10_bio->devs[slot].devnum; in find_bio_disk()
385 rdev = r10_bio->devs[slot].rdev; in raid10_end_read_request()
501 r10_bio->devs[slot].bio = NULL; in raid10_end_write_request()
528 if (rdev_has_badblock(rdev, r10_bio->devs[slot].addr, in raid10_end_write_request()
533 r10_bio->devs[slot].repl_bio = IO_MADE_GOOD; in raid10_end_write_request()
535 r10_bio->devs[slot].bio = IO_MADE_GOOD; in raid10_end_write_request()
611 r10bio->devs[slot].devnum = d; in __raid10_find_phys()
612 r10bio->devs[slot].addr = s; in __raid10_find_phys()
629 r10bio->devs[slot].devnum = d; in __raid10_find_phys()
630 r10bio->devs[slot].addr = s; in __raid10_find_phys()
756 if (r10_bio->devs[slot].bio == IO_BLOCKED) in read_balance()
758 disk = r10_bio->devs[slot].devnum; in read_balance()
761 r10_bio->devs[slot].addr + sectors > in read_balance()
768 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset) in read_balance()
771 dev_sector = r10_bio->devs[slot].addr; in read_balance()
827 new_distance = r10_bio->devs[slot].addr; in read_balance()
829 new_distance = abs(r10_bio->devs[slot].addr - in read_balance()
1158 if (slot >= 0 && r10_bio->devs[slot].rdev) { in raid10_read_request()
1173 disk = r10_bio->devs[slot].devnum; in raid10_read_request()
1180 err_rdev = r10_bio->devs[slot].rdev; in raid10_read_request()
1224 r10_bio->devs[slot].bio = read_bio; in raid10_read_request()
1225 r10_bio->devs[slot].rdev = rdev; in raid10_read_request()
1227 read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr + in raid10_read_request()
1251 int devnum = r10_bio->devs[n_copy].devnum; in raid10_write_one_disk()
1259 r10_bio->devs[n_copy].repl_bio = mbio; in raid10_write_one_disk()
1261 r10_bio->devs[n_copy].bio = mbio; in raid10_write_one_disk()
1263 mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr + in raid10_write_one_disk()
1298 sector_t dev_sector = r10_bio->devs[i].addr; in wait_blocked_dev()
1416 int d = r10_bio->devs[i].devnum; in raid10_write_request()
1426 r10_bio->devs[i].bio = NULL; in raid10_write_request()
1427 r10_bio->devs[i].repl_bio = NULL; in raid10_write_request()
1433 sector_t dev_sector = r10_bio->devs[i].addr; in raid10_write_request()
1470 r10_bio->devs[i].bio = bio; in raid10_write_request()
1474 r10_bio->devs[i].repl_bio = bio; in raid10_write_request()
1502 if (r10_bio->devs[i].bio) in raid10_write_request()
1504 if (r10_bio->devs[i].repl_bio) in raid10_write_request()
1511 int d = r10_bio->devs[k].devnum; in raid10_write_request()
1515 if (r10_bio->devs[k].bio) { in raid10_write_request()
1517 r10_bio->devs[k].bio = NULL; in raid10_write_request()
1519 if (r10_bio->devs[k].repl_bio) { in raid10_write_request()
1521 r10_bio->devs[k].repl_bio = NULL; in raid10_write_request()
1544 memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * in __make_request()
1727 memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * geo->raid_disks); in raid10_handle_discard()
1756 r10_bio->devs[disk].bio = NULL; in raid10_handle_discard()
1757 r10_bio->devs[disk].repl_bio = NULL; in raid10_handle_discard()
1767 r10_bio->devs[disk].bio = bio; in raid10_handle_discard()
1771 r10_bio->devs[disk].repl_bio = bio; in raid10_handle_discard()
1813 if (r10_bio->devs[disk].bio) { in raid10_handle_discard()
1819 r10_bio->devs[disk].bio = mbio; in raid10_handle_discard()
1820 r10_bio->devs[disk].devnum = disk; in raid10_handle_discard()
1827 if (r10_bio->devs[disk].repl_bio) { in raid10_handle_discard()
1833 r10_bio->devs[disk].repl_bio = rbio; in raid10_handle_discard()
1834 r10_bio->devs[disk].devnum = disk; in raid10_handle_discard()
2316 } else if (rdev_has_badblock(rdev, r10_bio->devs[slot].addr, in end_sync_write()
2354 if (!r10_bio->devs[i].bio->bi_status) in sync_request_write()
2361 fbio = r10_bio->devs[i].bio; in sync_request_write()
2373 tbio = r10_bio->devs[i].bio; in sync_request_write()
2381 d = r10_bio->devs[i].devnum; in sync_request_write()
2383 if (!r10_bio->devs[i].bio->bi_status) { in sync_request_write()
2422 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; in sync_request_write()
2443 tbio = r10_bio->devs[i].repl_bio; in sync_request_write()
2446 if (r10_bio->devs[i].bio->bi_end_io != end_sync_write in sync_request_write()
2447 && r10_bio->devs[i].bio != fbio) in sync_request_write()
2449 d = r10_bio->devs[i].devnum; in sync_request_write()
2484 struct bio *bio = r10_bio->devs[0].bio; in fix_recovery_read_error()
2488 int dr = r10_bio->devs[0].devnum; in fix_recovery_read_error()
2489 int dw = r10_bio->devs[1].devnum; in fix_recovery_read_error()
2502 addr = r10_bio->devs[0].addr + sect; in fix_recovery_read_error()
2510 addr = r10_bio->devs[1].addr + sect; in fix_recovery_read_error()
2534 addr = r10_bio->devs[1].addr + sect; in fix_recovery_read_error()
2560 struct bio *wbio = r10_bio->devs[1].bio; in recovery_request_write()
2561 struct bio *wbio2 = r10_bio->devs[1].repl_bio; in recovery_request_write()
2583 d = r10_bio->devs[1].devnum; in recovery_request_write()
2631 int d = r10_bio->devs[slot].devnum; in fix_read_error()
2644 r10_bio->devs[slot].bio = IO_BLOCKED; in fix_read_error()
2658 d = r10_bio->devs[sl].devnum; in fix_read_error()
2664 r10_bio->devs[sl].addr + sect, in fix_read_error()
2668 r10_bio->devs[sl].addr + in fix_read_error()
2687 int dn = r10_bio->devs[slot].devnum; in fix_read_error()
2692 r10_bio->devs[slot].addr in fix_read_error()
2696 r10_bio->devs[slot].bio in fix_read_error()
2708 d = r10_bio->devs[sl].devnum; in fix_read_error()
2717 r10_bio->devs[sl].addr + in fix_read_error()
2740 d = r10_bio->devs[sl].devnum; in fix_read_error()
2749 r10_bio->devs[sl].addr + in fix_read_error()
2787 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev; in narrow_write_error()
2824 wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector); in narrow_write_error()
2848 struct md_rdev *rdev = r10_bio->devs[slot].rdev; in handle_read_error()
2858 bio = r10_bio->devs[slot].bio; in handle_read_error()
2860 r10_bio->devs[slot].bio = NULL; in handle_read_error()
2863 r10_bio->devs[slot].bio = IO_BLOCKED; in handle_read_error()
2895 int dev = r10_bio->devs[m].devnum; in handle_write_completed()
2897 if (r10_bio->devs[m].bio == NULL || in handle_write_completed()
2898 r10_bio->devs[m].bio->bi_end_io == NULL) in handle_write_completed()
2900 if (!r10_bio->devs[m].bio->bi_status) { in handle_write_completed()
2903 r10_bio->devs[m].addr, in handle_write_completed()
2908 r10_bio->devs[m].addr, in handle_write_completed()
2913 if (r10_bio->devs[m].repl_bio == NULL || in handle_write_completed()
2914 r10_bio->devs[m].repl_bio->bi_end_io == NULL) in handle_write_completed()
2917 if (!r10_bio->devs[m].repl_bio->bi_status) { in handle_write_completed()
2920 r10_bio->devs[m].addr, in handle_write_completed()
2925 r10_bio->devs[m].addr, in handle_write_completed()
2934 int dev = r10_bio->devs[m].devnum; in handle_write_completed()
2935 struct bio *bio = r10_bio->devs[m].bio; in handle_write_completed()
2940 r10_bio->devs[m].addr, in handle_write_completed()
2949 bio = r10_bio->devs[m].repl_bio; in handle_write_completed()
2954 r10_bio->devs[m].addr, in handle_write_completed()
3084 bio = r10bio->devs[i].bio; in raid10_alloc_init_r10buf()
3088 bio = r10bio->devs[i].repl_bio; in raid10_alloc_init_r10buf()
3405 int d = r10_bio->devs[j].devnum; in raid10_sync_request()
3415 sector = r10_bio->devs[j].addr; in raid10_sync_request()
3429 bio = r10_bio->devs[0].bio; in raid10_sync_request()
3436 from_addr = r10_bio->devs[j].addr; in raid10_sync_request()
3444 if (r10_bio->devs[k].devnum == i) in raid10_sync_request()
3447 to_addr = r10_bio->devs[k].addr; in raid10_sync_request()
3448 r10_bio->devs[0].devnum = d; in raid10_sync_request()
3449 r10_bio->devs[0].addr = from_addr; in raid10_sync_request()
3450 r10_bio->devs[1].devnum = i; in raid10_sync_request()
3451 r10_bio->devs[1].addr = to_addr; in raid10_sync_request()
3454 bio = r10_bio->devs[1].bio; in raid10_sync_request()
3464 r10_bio->devs[1].bio->bi_end_io = NULL; in raid10_sync_request()
3467 bio = r10_bio->devs[1].repl_bio; in raid10_sync_request()
3495 if (r10_bio->devs[k].devnum == i) in raid10_sync_request()
3501 r10_bio->devs[k].addr, in raid10_sync_request()
3507 r10_bio->devs[k].addr, in raid10_sync_request()
3535 if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) { in raid10_sync_request()
3542 int d = r10_bio->devs[j].devnum; in raid10_sync_request()
3549 r10_bio->devs[0].bio->bi_opf in raid10_sync_request()
3603 int d = r10_bio->devs[i].devnum; in raid10_sync_request()
3608 if (r10_bio->devs[i].repl_bio) in raid10_sync_request()
3609 r10_bio->devs[i].repl_bio->bi_end_io = NULL; in raid10_sync_request()
3611 bio = r10_bio->devs[i].bio; in raid10_sync_request()
3617 sector = r10_bio->devs[i].addr; in raid10_sync_request()
3648 bio = r10_bio->devs[i].repl_bio; in raid10_sync_request()
3651 sector = r10_bio->devs[i].addr; in raid10_sync_request()
3665 int d = r10_bio->devs[i].devnum; in raid10_sync_request()
3666 if (r10_bio->devs[i].bio->bi_end_io) in raid10_sync_request()
3669 if (r10_bio->devs[i].repl_bio && in raid10_sync_request()
3670 r10_bio->devs[i].repl_bio->bi_end_io) in raid10_sync_request()
4258 static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs) in raid10_takeover_raid0() argument
4268 sector_div(size, devs); in raid10_takeover_raid0()
4799 read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr in reshape_request()
4804 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum; in reshape_request()
4840 int d = r10_bio->devs[s/2].devnum; in reshape_request()
4844 b = r10_bio->devs[s/2].repl_bio; in reshape_request()
4847 b = r10_bio->devs[s/2].bio; in reshape_request()
4853 b->bi_iter.bi_sector = r10_bio->devs[s/2].addr + in reshape_request()
4864 pages = get_resync_pages(r10_bio->devs[0].bio)->pages; in reshape_request()
4930 int d = r10_bio->devs[s/2].devnum; in reshape_request_write()
4934 b = r10_bio->devs[s/2].repl_bio; in reshape_request_write()
4937 b = r10_bio->devs[s/2].bio; in reshape_request_write()
4992 r10b = kmalloc(struct_size(r10b, devs, conf->copies), GFP_NOIO); in handle_reshape_read_error()
4999 pages = get_resync_pages(r10_bio->devs[0].bio)->pages; in handle_reshape_read_error()
5013 int d = r10b->devs[slot].devnum; in handle_reshape_read_error()
5021 addr = r10b->devs[slot].addr + idx * PAGE_SIZE; in handle_reshape_read_error()