Searched refs:dev_sectors (Results 1 – 17 of 17) sorted by relevance
249 sector_t dev_sectors; member695 rdev->sectors = mddev->dev_sectors; in rs_set_rdev_sectors()1620 if (ds < rs->md.dev_sectors) { in _check_data_dev_sectors()1672 dev_sectors *= rs->raid10_copies; in rs_set_dev_and_array_sectors()1688 mddev->dev_sectors = dev_sectors; in rs_set_dev_and_array_sectors()1709 rs->md.recovery_cp = dev_sectors; in rs_setup_recovery()1716 ? MaxSector : dev_sectors; in rs_setup_recovery()2901 rdev->sectors = mddev->dev_sectors; in rs_setup_reshape()3079 rs->dev_sectors = rs->md.dev_sectors; in raid_ctr()3802 rs->md.dev_sectors, data); in raid_iterate_devices()[all …]
62 sector_t dev_sectors; /* temp copy of member
1492 sb->size = mddev->dev_sectors / 2; in super_90_sync()2411 (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) { in bind_rdev_to_array()4395 mddev->dev_sectors == 0) in array_state_show()4705 if (mddev->dev_sectors == 0 || in size_store()4706 mddev->dev_sectors > sectors) in size_store()4707 mddev->dev_sectors = sectors; in size_store()5216 max_sectors = mddev->dev_sectors; in sync_completed_show()6069 if (mddev->dev_sectors && in md_run()6353 mddev->dev_sectors = 0; in md_clean()8210 max_sectors = mddev->dev_sectors; in status_resync()[all …]
3195 return mddev->dev_sectors - sector_nr; in raid10_sync_request()3797 sectors = conf->dev_sectors; in raid10_size()3827 conf->dev_sectors = size << conf->geo.chunk_shift; in calc_sectors()3956 calc_sectors(conf, mddev->dev_sectors); in setup_conf()3970 conf->prev.stride = conf->dev_sectors; in setup_conf()4157 mddev->dev_sectors = conf->dev_sectors; in raid10_run()4247 if (sectors > mddev->dev_sectors && in raid10_resize()4253 mddev->dev_sectors = conf->dev_sectors; in raid10_resize()4279 mddev->dev_sectors = size; in raid10_takeover_raid0()
3080 return mddev->dev_sectors; in raid1_size()3348 if (sectors > mddev->dev_sectors && in raid1_resize()3349 mddev->recovery_cp > mddev->dev_sectors) { in raid1_resize()3350 mddev->recovery_cp = mddev->dev_sectors; in raid1_resize()3353 mddev->dev_sectors = sectors; in raid1_resize()
428 sector_t dev_sectors; /* used size of member
6301 if (WARN_ON((mddev->dev_sectors & in reshape_request()6424 if (last_sector >= mddev->dev_sectors) in reshape_request()6425 last_sector = mddev->dev_sectors - 1; in reshape_request()6530 sector_t rv = mddev->dev_sectors - sector_nr; in raid5_sync_request()7289 sectors = mddev->dev_sectors; in raid5_size()7996 mddev->resync_max_sectors = mddev->dev_sectors; in raid5_run()8338 if (sectors > mddev->dev_sectors && in raid5_resize()8339 mddev->recovery_cp > mddev->dev_sectors) { in raid5_resize()8340 mddev->recovery_cp = mddev->dev_sectors; in raid5_resize()8343 mddev->dev_sectors = sectors; in raid5_resize()[all …]
666 rdev->sectors = mddev->dev_sectors; in raid0_takeover_raid45()
457 sboff < (doff + mddev->dev_sectors + PAGE_SIZE / SECTOR_SIZE)) in __write_sb_page()466 if (doff + mddev->dev_sectors > sboff) in __write_sb_page()
28 .dev_sectors = dev_size >> 9, in ublk_fault_inject_tgt_init()
28 .dev_sectors = dev_size >> 9, in ublk_null_tgt_init()
157 p.basic.dev_sectors = bytes >> 9; in ublk_loop_tgt_init()
328 p.basic.dev_sectors = bytes >> 9; in ublk_stripe_tgt_init()
316 1 << p.basic.logical_bs_shift, p.basic.dev_sectors); in ublk_ctrl_dump()
375 __u64 dev_sectors; member
1401 u64 dev_sectors = qc->dev->n_sectors; in ata_scsi_verify_xlat() local1432 if (block >= dev_sectors) in ata_scsi_verify_xlat()1434 if ((block + n_block) > dev_sectors) in ata_scsi_verify_xlat()
252 return p->dev_sectors >> ilog2(p->chunk_sectors); in ublk_get_nr_zones()531 set_capacity(ub->ub_disk, p->dev_sectors); in ublk_dev_param_basic_apply()