Lines Matching refs:bdev

38 	struct block_device bdev;  member
47 static inline struct inode *BD_INODE(struct block_device *bdev) in BD_INODE() argument
49 return &container_of(bdev, struct bdev_inode, bdev)->vfs_inode; in BD_INODE()
54 return &BDEV_I(inode)->bdev; in I_BDEV()
64 static void bdev_write_inode(struct block_device *bdev) in bdev_write_inode() argument
66 struct inode *inode = BD_INODE(bdev); in bdev_write_inode()
76 bdev, ret); in bdev_write_inode()
83 static void kill_bdev(struct block_device *bdev) in kill_bdev() argument
85 struct address_space *mapping = bdev->bd_mapping; in kill_bdev()
95 void invalidate_bdev(struct block_device *bdev) in invalidate_bdev() argument
97 struct address_space *mapping = bdev->bd_mapping; in invalidate_bdev()
111 int truncate_bdev_range(struct block_device *bdev, blk_mode_t mode, in truncate_bdev_range() argument
120 int err = bd_prepare_to_claim(bdev, truncate_bdev_range, NULL); in truncate_bdev_range()
125 truncate_inode_pages_range(bdev->bd_mapping, lstart, lend); in truncate_bdev_range()
127 bd_abort_claiming(bdev, truncate_bdev_range); in truncate_bdev_range()
135 return invalidate_inode_pages2_range(bdev->bd_mapping, in truncate_bdev_range()
140 static void set_init_blocksize(struct block_device *bdev) in set_init_blocksize() argument
142 unsigned int bsize = bdev_logical_block_size(bdev); in set_init_blocksize()
143 loff_t size = i_size_read(BD_INODE(bdev)); in set_init_blocksize()
150 BD_INODE(bdev)->i_blkbits = blksize_bits(bsize); in set_init_blocksize()
151 mapping_set_folio_min_order(BD_INODE(bdev)->i_mapping, in set_init_blocksize()
165 int bdev_validate_blocksize(struct block_device *bdev, int block_size) in bdev_validate_blocksize() argument
171 if (block_size < bdev_logical_block_size(bdev)) in bdev_validate_blocksize()
181 struct block_device *bdev = I_BDEV(inode); in set_blocksize() local
184 ret = bdev_validate_blocksize(bdev, size); in set_blocksize()
206 sync_blockdev(bdev); in set_blocksize()
207 kill_bdev(bdev); in set_blocksize()
211 kill_bdev(bdev); in set_blocksize()
244 int sync_blockdev_nowait(struct block_device *bdev) in sync_blockdev_nowait() argument
246 if (!bdev) in sync_blockdev_nowait()
248 return filemap_flush(bdev->bd_mapping); in sync_blockdev_nowait()
256 int sync_blockdev(struct block_device *bdev) in sync_blockdev() argument
258 if (!bdev) in sync_blockdev()
260 return filemap_write_and_wait(bdev->bd_mapping); in sync_blockdev()
264 int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend) in sync_blockdev_range() argument
266 return filemap_write_and_wait_range(bdev->bd_mapping, in sync_blockdev_range()
285 int bdev_freeze(struct block_device *bdev) in bdev_freeze() argument
289 mutex_lock(&bdev->bd_fsfreeze_mutex); in bdev_freeze()
291 if (atomic_inc_return(&bdev->bd_fsfreeze_count) > 1) { in bdev_freeze()
292 mutex_unlock(&bdev->bd_fsfreeze_mutex); in bdev_freeze()
296 mutex_lock(&bdev->bd_holder_lock); in bdev_freeze()
297 if (bdev->bd_holder_ops && bdev->bd_holder_ops->freeze) { in bdev_freeze()
298 error = bdev->bd_holder_ops->freeze(bdev); in bdev_freeze()
299 lockdep_assert_not_held(&bdev->bd_holder_lock); in bdev_freeze()
301 mutex_unlock(&bdev->bd_holder_lock); in bdev_freeze()
302 error = sync_blockdev(bdev); in bdev_freeze()
306 atomic_dec(&bdev->bd_fsfreeze_count); in bdev_freeze()
308 mutex_unlock(&bdev->bd_fsfreeze_mutex); in bdev_freeze()
321 int bdev_thaw(struct block_device *bdev) in bdev_thaw() argument
325 mutex_lock(&bdev->bd_fsfreeze_mutex); in bdev_thaw()
331 nr_freeze = atomic_dec_if_positive(&bdev->bd_fsfreeze_count); in bdev_thaw()
339 mutex_lock(&bdev->bd_holder_lock); in bdev_thaw()
340 if (bdev->bd_holder_ops && bdev->bd_holder_ops->thaw) { in bdev_thaw()
341 error = bdev->bd_holder_ops->thaw(bdev); in bdev_thaw()
342 lockdep_assert_not_held(&bdev->bd_holder_lock); in bdev_thaw()
344 mutex_unlock(&bdev->bd_holder_lock); in bdev_thaw()
348 atomic_inc(&bdev->bd_fsfreeze_count); in bdev_thaw()
350 mutex_unlock(&bdev->bd_fsfreeze_mutex); in bdev_thaw()
368 memset(&ei->bdev, 0, sizeof(ei->bdev)); in bdev_alloc_inode()
370 if (security_bdev_alloc(&ei->bdev)) { in bdev_alloc_inode()
379 struct block_device *bdev = I_BDEV(inode); in bdev_free_inode() local
381 free_percpu(bdev->bd_stats); in bdev_free_inode()
382 kfree(bdev->bd_meta_info); in bdev_free_inode()
383 security_bdev_free(bdev); in bdev_free_inode()
385 if (!bdev_is_partition(bdev)) { in bdev_free_inode()
386 if (bdev->bd_disk && bdev->bd_disk->bdi) in bdev_free_inode()
387 bdi_put(bdev->bd_disk->bdi); in bdev_free_inode()
388 kfree(bdev->bd_disk); in bdev_free_inode()
391 if (MAJOR(bdev->bd_dev) == BLOCK_EXT_MAJOR) in bdev_free_inode()
392 blk_free_ext_minor(MINOR(bdev->bd_dev)); in bdev_free_inode()
458 struct block_device *bdev; in bdev_alloc() local
469 bdev = I_BDEV(inode); in bdev_alloc()
470 mutex_init(&bdev->bd_fsfreeze_mutex); in bdev_alloc()
471 spin_lock_init(&bdev->bd_size_lock); in bdev_alloc()
472 mutex_init(&bdev->bd_holder_lock); in bdev_alloc()
473 atomic_set(&bdev->__bd_flags, partno); in bdev_alloc()
474 bdev->bd_mapping = &inode->i_data; in bdev_alloc()
475 bdev->bd_queue = disk->queue; in bdev_alloc()
477 bdev_set_flag(bdev, BD_HAS_SUBMIT_BIO); in bdev_alloc()
478 bdev->bd_stats = alloc_percpu(struct disk_stats); in bdev_alloc()
479 if (!bdev->bd_stats) { in bdev_alloc()
483 bdev->bd_disk = disk; in bdev_alloc()
484 return bdev; in bdev_alloc()
487 void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors) in bdev_set_nr_sectors() argument
489 spin_lock(&bdev->bd_size_lock); in bdev_set_nr_sectors()
490 i_size_write(BD_INODE(bdev), (loff_t)sectors << SECTOR_SHIFT); in bdev_set_nr_sectors()
491 bdev->bd_nr_sectors = sectors; in bdev_set_nr_sectors()
492 spin_unlock(&bdev->bd_size_lock); in bdev_set_nr_sectors()
495 void bdev_add(struct block_device *bdev, dev_t dev) in bdev_add() argument
497 struct inode *inode = BD_INODE(bdev); in bdev_add()
498 if (bdev_stable_writes(bdev)) in bdev_add()
499 mapping_set_stable_writes(bdev->bd_mapping); in bdev_add()
500 bdev->bd_dev = dev; in bdev_add()
506 void bdev_unhash(struct block_device *bdev) in bdev_unhash() argument
508 remove_inode_hash(BD_INODE(bdev)); in bdev_unhash()
511 void bdev_drop(struct block_device *bdev) in bdev_drop() argument
513 iput(BD_INODE(bdev)); in bdev_drop()
540 static bool bd_may_claim(struct block_device *bdev, void *holder, in bd_may_claim() argument
543 struct block_device *whole = bdev_whole(bdev); in bd_may_claim()
547 if (bdev->bd_holder) { in bd_may_claim()
551 if (bdev->bd_holder == holder) { in bd_may_claim()
552 if (WARN_ON_ONCE(bdev->bd_holder_ops != hops)) in bd_may_claim()
563 if (whole != bdev && in bd_may_claim()
582 int bd_prepare_to_claim(struct block_device *bdev, void *holder, in bd_prepare_to_claim() argument
585 struct block_device *whole = bdev_whole(bdev); in bd_prepare_to_claim()
592 if (!bd_may_claim(bdev, holder, hops)) { in bd_prepare_to_claim()
634 static void bd_finish_claiming(struct block_device *bdev, void *holder, in bd_finish_claiming() argument
637 struct block_device *whole = bdev_whole(bdev); in bd_finish_claiming()
640 BUG_ON(!bd_may_claim(bdev, holder, hops)); in bd_finish_claiming()
647 bdev->bd_holders++; in bd_finish_claiming()
648 mutex_lock(&bdev->bd_holder_lock); in bd_finish_claiming()
649 bdev->bd_holder = holder; in bd_finish_claiming()
650 bdev->bd_holder_ops = hops; in bd_finish_claiming()
651 mutex_unlock(&bdev->bd_holder_lock); in bd_finish_claiming()
665 void bd_abort_claiming(struct block_device *bdev, void *holder) in bd_abort_claiming() argument
668 bd_clear_claiming(bdev_whole(bdev), holder); in bd_abort_claiming()
673 static void bd_end_claim(struct block_device *bdev, void *holder) in bd_end_claim() argument
675 struct block_device *whole = bdev_whole(bdev); in bd_end_claim()
683 WARN_ON_ONCE(bdev->bd_holder != holder); in bd_end_claim()
684 WARN_ON_ONCE(--bdev->bd_holders < 0); in bd_end_claim()
686 if (!bdev->bd_holders) { in bd_end_claim()
687 mutex_lock(&bdev->bd_holder_lock); in bd_end_claim()
688 bdev->bd_holder = NULL; in bd_end_claim()
689 bdev->bd_holder_ops = NULL; in bd_end_claim()
690 mutex_unlock(&bdev->bd_holder_lock); in bd_end_claim()
691 if (bdev_test_flag(bdev, BD_WRITE_HOLDER)) in bd_end_claim()
703 disk_unblock_events(bdev->bd_disk); in bd_end_claim()
704 bdev_clear_flag(bdev, BD_WRITE_HOLDER); in bd_end_claim()
708 static void blkdev_flush_mapping(struct block_device *bdev) in blkdev_flush_mapping() argument
710 WARN_ON_ONCE(bdev->bd_holders); in blkdev_flush_mapping()
711 sync_blockdev(bdev); in blkdev_flush_mapping()
712 kill_bdev(bdev); in blkdev_flush_mapping()
713 bdev_write_inode(bdev); in blkdev_flush_mapping()
716 static void blkdev_put_whole(struct block_device *bdev) in blkdev_put_whole() argument
718 if (atomic_dec_and_test(&bdev->bd_openers)) in blkdev_put_whole()
719 blkdev_flush_mapping(bdev); in blkdev_put_whole()
720 if (bdev->bd_disk->fops->release) in blkdev_put_whole()
721 bdev->bd_disk->fops->release(bdev->bd_disk); in blkdev_put_whole()
724 static int blkdev_get_whole(struct block_device *bdev, blk_mode_t mode) in blkdev_get_whole() argument
726 struct gendisk *disk = bdev->bd_disk; in blkdev_get_whole()
740 if (!atomic_read(&bdev->bd_openers)) in blkdev_get_whole()
741 set_init_blocksize(bdev); in blkdev_get_whole()
742 atomic_inc(&bdev->bd_openers); in blkdev_get_whole()
750 blkdev_put_whole(bdev); in blkdev_get_whole()
820 struct block_device *bdev; in blkdev_get_no_open() local
835 bdev = &BDEV_I(inode)->bdev; in blkdev_get_no_open()
836 if (!kobject_get_unless_zero(&bdev->bd_device.kobj)) in blkdev_get_no_open()
837 bdev = NULL; in blkdev_get_no_open()
839 return bdev; in blkdev_get_no_open()
842 void blkdev_put_no_open(struct block_device *bdev) in blkdev_put_no_open() argument
844 put_device(&bdev->bd_device); in blkdev_put_no_open()
847 static bool bdev_writes_blocked(struct block_device *bdev) in bdev_writes_blocked() argument
849 return bdev->bd_writers < 0; in bdev_writes_blocked()
852 static void bdev_block_writes(struct block_device *bdev) in bdev_block_writes() argument
854 bdev->bd_writers--; in bdev_block_writes()
857 static void bdev_unblock_writes(struct block_device *bdev) in bdev_unblock_writes() argument
859 bdev->bd_writers++; in bdev_unblock_writes()
862 static bool bdev_may_open(struct block_device *bdev, blk_mode_t mode) in bdev_may_open() argument
867 if (mode & BLK_OPEN_WRITE && bdev_writes_blocked(bdev)) in bdev_may_open()
869 if (mode & BLK_OPEN_RESTRICT_WRITES && bdev->bd_writers > 0) in bdev_may_open()
874 static void bdev_claim_write_access(struct block_device *bdev, blk_mode_t mode) in bdev_claim_write_access() argument
881 bdev_block_writes(bdev); in bdev_claim_write_access()
883 bdev->bd_writers++; in bdev_claim_write_access()
893 struct block_device *bdev; in bdev_yield_write_access() local
901 bdev = file_bdev(bdev_file); in bdev_yield_write_access()
904 bdev_unblock_writes(bdev); in bdev_yield_write_access()
906 bdev->bd_writers--; in bdev_yield_write_access()
926 int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder, in bdev_open() argument
930 struct gendisk *disk = bdev->bd_disk; in bdev_open()
935 ret = bd_prepare_to_claim(bdev, holder, hops); in bdev_open()
952 if (!bdev_may_open(bdev, mode)) in bdev_open()
954 if (bdev_is_partition(bdev)) in bdev_open()
955 ret = blkdev_get_part(bdev, mode); in bdev_open()
957 ret = blkdev_get_whole(bdev, mode); in bdev_open()
960 bdev_claim_write_access(bdev, mode); in bdev_open()
962 bd_finish_claiming(bdev, holder, hops); in bdev_open()
972 !bdev_test_flag(bdev, BD_WRITE_HOLDER) && in bdev_open()
974 bdev_set_flag(bdev, BD_WRITE_HOLDER); in bdev_open()
985 if (bdev_nowait(bdev)) in bdev_open()
989 bdev_file->f_mapping = bdev->bd_mapping; in bdev_open()
998 bd_abort_claiming(bdev, holder); in bdev_open()
1038 struct block_device *bdev; in bdev_file_open_by_dev() local
1046 bdev = blkdev_get_no_open(dev, true); in bdev_file_open_by_dev()
1047 if (!bdev) in bdev_file_open_by_dev()
1051 bdev_file = alloc_file_pseudo_noaccount(BD_INODE(bdev), in bdev_file_open_by_dev()
1054 blkdev_put_no_open(bdev); in bdev_file_open_by_dev()
1057 ihold(BD_INODE(bdev)); in bdev_file_open_by_dev()
1059 ret = bdev_open(bdev, mode, holder, hops, bdev_file); in bdev_file_open_by_dev()
1096 struct block_device *bdev = file_bdev(bdev_file); in bd_yield_claim() local
1099 lockdep_assert_held(&bdev->bd_disk->open_mutex); in bd_yield_claim()
1105 bd_end_claim(bdev, holder); in bd_yield_claim()
1110 struct block_device *bdev = file_bdev(bdev_file); in bdev_release() local
1112 struct gendisk *disk = bdev->bd_disk; in bdev_release()
1125 if (atomic_read(&bdev->bd_openers) == 1) in bdev_release()
1126 sync_blockdev(bdev); in bdev_release()
1141 if (bdev_is_partition(bdev)) in bdev_release()
1142 blkdev_put_part(bdev); in bdev_release()
1144 blkdev_put_whole(bdev); in bdev_release()
1149 blkdev_put_no_open(bdev); in bdev_release()
1166 struct block_device *bdev = file_bdev(bdev_file); in bdev_fput() local
1167 struct gendisk *disk = bdev->bd_disk; in bdev_fput()
1238 void bdev_mark_dead(struct block_device *bdev, bool surprise) in bdev_mark_dead() argument
1240 mutex_lock(&bdev->bd_holder_lock); in bdev_mark_dead()
1241 if (bdev->bd_holder_ops && bdev->bd_holder_ops->mark_dead) in bdev_mark_dead()
1242 bdev->bd_holder_ops->mark_dead(bdev, surprise); in bdev_mark_dead()
1244 mutex_unlock(&bdev->bd_holder_lock); in bdev_mark_dead()
1245 sync_blockdev(bdev); in bdev_mark_dead()
1248 invalidate_bdev(bdev); in bdev_mark_dead()
1265 struct block_device *bdev; in sync_bdevs() local
1286 bdev = I_BDEV(inode); in sync_bdevs()
1288 mutex_lock(&bdev->bd_disk->open_mutex); in sync_bdevs()
1289 if (!atomic_read(&bdev->bd_openers)) { in sync_bdevs()
1302 mutex_unlock(&bdev->bd_disk->open_mutex); in sync_bdevs()
1315 struct block_device *bdev; in bdev_statx() local
1323 bdev = blkdev_get_no_open(d_backing_inode(path->dentry)->i_rdev, false); in bdev_statx()
1324 if (!bdev) in bdev_statx()
1328 stat->dio_mem_align = bdev_dma_alignment(bdev) + 1; in bdev_statx()
1329 stat->dio_offset_align = bdev_logical_block_size(bdev); in bdev_statx()
1333 if (request_mask & STATX_WRITE_ATOMIC && bdev_can_atomic_write(bdev)) { in bdev_statx()
1334 struct request_queue *bd_queue = bdev->bd_queue; in bdev_statx()
1341 stat->blksize = bdev_io_min(bdev); in bdev_statx()
1343 blkdev_put_no_open(bdev); in bdev_statx()
1352 unsigned int block_size(struct block_device *bdev) in block_size() argument
1354 return 1 << BD_INODE(bdev)->i_blkbits; in block_size()