xref: /linux-6.15/block/bdev.c (revision 203c1ce0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  Copyright (C) 1991, 1992  Linus Torvalds
4  *  Copyright (C) 2001  Andrea Arcangeli <[email protected]> SuSE
5  *  Copyright (C) 2016 - 2020 Christoph Hellwig
6  */
7 
8 #include <linux/init.h>
9 #include <linux/mm.h>
10 #include <linux/slab.h>
11 #include <linux/kmod.h>
12 #include <linux/major.h>
13 #include <linux/device_cgroup.h>
14 #include <linux/blkdev.h>
15 #include <linux/blk-integrity.h>
16 #include <linux/backing-dev.h>
17 #include <linux/module.h>
18 #include <linux/blkpg.h>
19 #include <linux/magic.h>
20 #include <linux/buffer_head.h>
21 #include <linux/swap.h>
22 #include <linux/writeback.h>
23 #include <linux/mount.h>
24 #include <linux/pseudo_fs.h>
25 #include <linux/uio.h>
26 #include <linux/namei.h>
27 #include <linux/part_stat.h>
28 #include <linux/uaccess.h>
29 #include <linux/stat.h>
30 #include "../fs/internal.h"
31 #include "blk.h"
32 
33 /* Should we allow writing to mounted block devices? */
34 static bool bdev_allow_write_mounted = IS_ENABLED(CONFIG_BLK_DEV_WRITE_MOUNTED);
35 
36 struct bdev_inode {
37 	struct block_device bdev;
38 	struct inode vfs_inode;
39 };
40 
41 static inline struct bdev_inode *BDEV_I(struct inode *inode)
42 {
43 	return container_of(inode, struct bdev_inode, vfs_inode);
44 }
45 
46 static inline struct inode *BD_INODE(struct block_device *bdev)
47 {
48 	return &container_of(bdev, struct bdev_inode, bdev)->vfs_inode;
49 }
50 
51 struct block_device *I_BDEV(struct inode *inode)
52 {
53 	return &BDEV_I(inode)->bdev;
54 }
55 EXPORT_SYMBOL(I_BDEV);
56 
57 struct block_device *file_bdev(struct file *bdev_file)
58 {
59 	return I_BDEV(bdev_file->f_mapping->host);
60 }
61 EXPORT_SYMBOL(file_bdev);
62 
63 static void bdev_write_inode(struct block_device *bdev)
64 {
65 	struct inode *inode = BD_INODE(bdev);
66 	int ret;
67 
68 	spin_lock(&inode->i_lock);
69 	while (inode->i_state & I_DIRTY) {
70 		spin_unlock(&inode->i_lock);
71 		ret = write_inode_now(inode, true);
72 		if (ret)
73 			pr_warn_ratelimited(
74 	"VFS: Dirty inode writeback failed for block device %pg (err=%d).\n",
75 				bdev, ret);
76 		spin_lock(&inode->i_lock);
77 	}
78 	spin_unlock(&inode->i_lock);
79 }
80 
81 /* Kill _all_ buffers and pagecache , dirty or not.. */
82 static void kill_bdev(struct block_device *bdev)
83 {
84 	struct address_space *mapping = bdev->bd_mapping;
85 
86 	if (mapping_empty(mapping))
87 		return;
88 
89 	invalidate_bh_lrus();
90 	truncate_inode_pages(mapping, 0);
91 }
92 
93 /* Invalidate clean unused buffers and pagecache. */
94 void invalidate_bdev(struct block_device *bdev)
95 {
96 	struct address_space *mapping = bdev->bd_mapping;
97 
98 	if (mapping->nrpages) {
99 		invalidate_bh_lrus();
100 		lru_add_drain_all();	/* make sure all lru add caches are flushed */
101 		invalidate_mapping_pages(mapping, 0, -1);
102 	}
103 }
104 EXPORT_SYMBOL(invalidate_bdev);
105 
106 /*
107  * Drop all buffers & page cache for given bdev range. This function bails
108  * with error if bdev has other exclusive owner (such as filesystem).
109  */
110 int truncate_bdev_range(struct block_device *bdev, blk_mode_t mode,
111 			loff_t lstart, loff_t lend)
112 {
113 	/*
114 	 * If we don't hold exclusive handle for the device, upgrade to it
115 	 * while we discard the buffer cache to avoid discarding buffers
116 	 * under live filesystem.
117 	 */
118 	if (!(mode & BLK_OPEN_EXCL)) {
119 		int err = bd_prepare_to_claim(bdev, truncate_bdev_range, NULL);
120 		if (err)
121 			goto invalidate;
122 	}
123 
124 	truncate_inode_pages_range(bdev->bd_mapping, lstart, lend);
125 	if (!(mode & BLK_OPEN_EXCL))
126 		bd_abort_claiming(bdev, truncate_bdev_range);
127 	return 0;
128 
129 invalidate:
130 	/*
131 	 * Someone else has handle exclusively open. Try invalidating instead.
132 	 * The 'end' argument is inclusive so the rounding is safe.
133 	 */
134 	return invalidate_inode_pages2_range(bdev->bd_mapping,
135 					     lstart >> PAGE_SHIFT,
136 					     lend >> PAGE_SHIFT);
137 }
138 
139 static void set_init_blocksize(struct block_device *bdev)
140 {
141 	unsigned int bsize = bdev_logical_block_size(bdev);
142 	loff_t size = i_size_read(BD_INODE(bdev));
143 
144 	while (bsize < PAGE_SIZE) {
145 		if (size & bsize)
146 			break;
147 		bsize <<= 1;
148 	}
149 	BD_INODE(bdev)->i_blkbits = blksize_bits(bsize);
150 }
151 
152 int set_blocksize(struct file *file, int size)
153 {
154 	struct inode *inode = file->f_mapping->host;
155 	struct block_device *bdev = I_BDEV(inode);
156 
157 	/* Size must be a power of two, and between 512 and PAGE_SIZE */
158 	if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size))
159 		return -EINVAL;
160 
161 	/* Size cannot be smaller than the size supported by the device */
162 	if (size < bdev_logical_block_size(bdev))
163 		return -EINVAL;
164 
165 	if (!file->private_data)
166 		return -EINVAL;
167 
168 	/* Don't change the size if it is same as current */
169 	if (inode->i_blkbits != blksize_bits(size)) {
170 		sync_blockdev(bdev);
171 		inode->i_blkbits = blksize_bits(size);
172 		kill_bdev(bdev);
173 	}
174 	return 0;
175 }
176 
177 EXPORT_SYMBOL(set_blocksize);
178 
179 int sb_set_blocksize(struct super_block *sb, int size)
180 {
181 	if (set_blocksize(sb->s_bdev_file, size))
182 		return 0;
183 	/* If we get here, we know size is power of two
184 	 * and it's value is between 512 and PAGE_SIZE */
185 	sb->s_blocksize = size;
186 	sb->s_blocksize_bits = blksize_bits(size);
187 	return sb->s_blocksize;
188 }
189 
190 EXPORT_SYMBOL(sb_set_blocksize);
191 
192 int sb_min_blocksize(struct super_block *sb, int size)
193 {
194 	int minsize = bdev_logical_block_size(sb->s_bdev);
195 	if (size < minsize)
196 		size = minsize;
197 	return sb_set_blocksize(sb, size);
198 }
199 
200 EXPORT_SYMBOL(sb_min_blocksize);
201 
202 int sync_blockdev_nowait(struct block_device *bdev)
203 {
204 	if (!bdev)
205 		return 0;
206 	return filemap_flush(bdev->bd_mapping);
207 }
208 EXPORT_SYMBOL_GPL(sync_blockdev_nowait);
209 
210 /*
211  * Write out and wait upon all the dirty data associated with a block
212  * device via its mapping.  Does not take the superblock lock.
213  */
214 int sync_blockdev(struct block_device *bdev)
215 {
216 	if (!bdev)
217 		return 0;
218 	return filemap_write_and_wait(bdev->bd_mapping);
219 }
220 EXPORT_SYMBOL(sync_blockdev);
221 
222 int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend)
223 {
224 	return filemap_write_and_wait_range(bdev->bd_mapping,
225 			lstart, lend);
226 }
227 EXPORT_SYMBOL(sync_blockdev_range);
228 
229 /**
230  * bdev_freeze - lock a filesystem and force it into a consistent state
231  * @bdev:	blockdevice to lock
232  *
233  * If a superblock is found on this device, we take the s_umount semaphore
234  * on it to make sure nobody unmounts until the snapshot creation is done.
235  * The reference counter (bd_fsfreeze_count) guarantees that only the last
236  * unfreeze process can unfreeze the frozen filesystem actually when multiple
237  * freeze requests arrive simultaneously. It counts up in bdev_freeze() and
238  * count down in bdev_thaw(). When it becomes 0, thaw_bdev() will unfreeze
239  * actually.
240  *
241  * Return: On success zero is returned, negative error code on failure.
242  */
243 int bdev_freeze(struct block_device *bdev)
244 {
245 	int error = 0;
246 
247 	mutex_lock(&bdev->bd_fsfreeze_mutex);
248 
249 	if (atomic_inc_return(&bdev->bd_fsfreeze_count) > 1) {
250 		mutex_unlock(&bdev->bd_fsfreeze_mutex);
251 		return 0;
252 	}
253 
254 	mutex_lock(&bdev->bd_holder_lock);
255 	if (bdev->bd_holder_ops && bdev->bd_holder_ops->freeze) {
256 		error = bdev->bd_holder_ops->freeze(bdev);
257 		lockdep_assert_not_held(&bdev->bd_holder_lock);
258 	} else {
259 		mutex_unlock(&bdev->bd_holder_lock);
260 		error = sync_blockdev(bdev);
261 	}
262 
263 	if (error)
264 		atomic_dec(&bdev->bd_fsfreeze_count);
265 
266 	mutex_unlock(&bdev->bd_fsfreeze_mutex);
267 	return error;
268 }
269 EXPORT_SYMBOL(bdev_freeze);
270 
271 /**
272  * bdev_thaw - unlock filesystem
273  * @bdev:	blockdevice to unlock
274  *
275  * Unlocks the filesystem and marks it writeable again after bdev_freeze().
276  *
277  * Return: On success zero is returned, negative error code on failure.
278  */
279 int bdev_thaw(struct block_device *bdev)
280 {
281 	int error = -EINVAL, nr_freeze;
282 
283 	mutex_lock(&bdev->bd_fsfreeze_mutex);
284 
285 	/*
286 	 * If this returns < 0 it means that @bd_fsfreeze_count was
287 	 * already 0 and no decrement was performed.
288 	 */
289 	nr_freeze = atomic_dec_if_positive(&bdev->bd_fsfreeze_count);
290 	if (nr_freeze < 0)
291 		goto out;
292 
293 	error = 0;
294 	if (nr_freeze > 0)
295 		goto out;
296 
297 	mutex_lock(&bdev->bd_holder_lock);
298 	if (bdev->bd_holder_ops && bdev->bd_holder_ops->thaw) {
299 		error = bdev->bd_holder_ops->thaw(bdev);
300 		lockdep_assert_not_held(&bdev->bd_holder_lock);
301 	} else {
302 		mutex_unlock(&bdev->bd_holder_lock);
303 	}
304 
305 	if (error)
306 		atomic_inc(&bdev->bd_fsfreeze_count);
307 out:
308 	mutex_unlock(&bdev->bd_fsfreeze_mutex);
309 	return error;
310 }
311 EXPORT_SYMBOL(bdev_thaw);
312 
313 /*
314  * pseudo-fs
315  */
316 
317 static  __cacheline_aligned_in_smp DEFINE_MUTEX(bdev_lock);
318 static struct kmem_cache *bdev_cachep __ro_after_init;
319 
320 static struct inode *bdev_alloc_inode(struct super_block *sb)
321 {
322 	struct bdev_inode *ei = alloc_inode_sb(sb, bdev_cachep, GFP_KERNEL);
323 
324 	if (!ei)
325 		return NULL;
326 	memset(&ei->bdev, 0, sizeof(ei->bdev));
327 	return &ei->vfs_inode;
328 }
329 
330 static void bdev_free_inode(struct inode *inode)
331 {
332 	struct block_device *bdev = I_BDEV(inode);
333 
334 	free_percpu(bdev->bd_stats);
335 	kfree(bdev->bd_meta_info);
336 
337 	if (!bdev_is_partition(bdev)) {
338 		if (bdev->bd_disk && bdev->bd_disk->bdi)
339 			bdi_put(bdev->bd_disk->bdi);
340 		kfree(bdev->bd_disk);
341 	}
342 
343 	if (MAJOR(bdev->bd_dev) == BLOCK_EXT_MAJOR)
344 		blk_free_ext_minor(MINOR(bdev->bd_dev));
345 
346 	kmem_cache_free(bdev_cachep, BDEV_I(inode));
347 }
348 
349 static void init_once(void *data)
350 {
351 	struct bdev_inode *ei = data;
352 
353 	inode_init_once(&ei->vfs_inode);
354 }
355 
356 static void bdev_evict_inode(struct inode *inode)
357 {
358 	truncate_inode_pages_final(&inode->i_data);
359 	invalidate_inode_buffers(inode); /* is it needed here? */
360 	clear_inode(inode);
361 }
362 
363 static const struct super_operations bdev_sops = {
364 	.statfs = simple_statfs,
365 	.alloc_inode = bdev_alloc_inode,
366 	.free_inode = bdev_free_inode,
367 	.drop_inode = generic_delete_inode,
368 	.evict_inode = bdev_evict_inode,
369 };
370 
371 static int bd_init_fs_context(struct fs_context *fc)
372 {
373 	struct pseudo_fs_context *ctx = init_pseudo(fc, BDEVFS_MAGIC);
374 	if (!ctx)
375 		return -ENOMEM;
376 	fc->s_iflags |= SB_I_CGROUPWB;
377 	ctx->ops = &bdev_sops;
378 	return 0;
379 }
380 
381 static struct file_system_type bd_type = {
382 	.name		= "bdev",
383 	.init_fs_context = bd_init_fs_context,
384 	.kill_sb	= kill_anon_super,
385 };
386 
387 struct super_block *blockdev_superblock __ro_after_init;
388 struct vfsmount *blockdev_mnt __ro_after_init;
389 EXPORT_SYMBOL_GPL(blockdev_superblock);
390 
391 void __init bdev_cache_init(void)
392 {
393 	int err;
394 
395 	bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode),
396 			0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
397 				SLAB_ACCOUNT|SLAB_PANIC),
398 			init_once);
399 	err = register_filesystem(&bd_type);
400 	if (err)
401 		panic("Cannot register bdev pseudo-fs");
402 	blockdev_mnt = kern_mount(&bd_type);
403 	if (IS_ERR(blockdev_mnt))
404 		panic("Cannot create bdev pseudo-fs");
405 	blockdev_superblock = blockdev_mnt->mnt_sb;   /* For writeback */
406 }
407 
408 struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
409 {
410 	struct block_device *bdev;
411 	struct inode *inode;
412 
413 	inode = new_inode(blockdev_superblock);
414 	if (!inode)
415 		return NULL;
416 	inode->i_mode = S_IFBLK;
417 	inode->i_rdev = 0;
418 	inode->i_data.a_ops = &def_blk_aops;
419 	mapping_set_gfp_mask(&inode->i_data, GFP_USER);
420 
421 	bdev = I_BDEV(inode);
422 	mutex_init(&bdev->bd_fsfreeze_mutex);
423 	spin_lock_init(&bdev->bd_size_lock);
424 	mutex_init(&bdev->bd_holder_lock);
425 	bdev->bd_partno = partno;
426 	bdev->bd_mapping = &inode->i_data;
427 	bdev->bd_queue = disk->queue;
428 	if (partno)
429 		bdev->bd_has_submit_bio = disk->part0->bd_has_submit_bio;
430 	else
431 		bdev->bd_has_submit_bio = false;
432 	bdev->bd_stats = alloc_percpu(struct disk_stats);
433 	if (!bdev->bd_stats) {
434 		iput(inode);
435 		return NULL;
436 	}
437 	bdev->bd_disk = disk;
438 	return bdev;
439 }
440 
441 void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors)
442 {
443 	spin_lock(&bdev->bd_size_lock);
444 	i_size_write(BD_INODE(bdev), (loff_t)sectors << SECTOR_SHIFT);
445 	bdev->bd_nr_sectors = sectors;
446 	spin_unlock(&bdev->bd_size_lock);
447 }
448 
449 void bdev_add(struct block_device *bdev, dev_t dev)
450 {
451 	struct inode *inode = BD_INODE(bdev);
452 	if (bdev_stable_writes(bdev))
453 		mapping_set_stable_writes(bdev->bd_mapping);
454 	bdev->bd_dev = dev;
455 	inode->i_rdev = dev;
456 	inode->i_ino = dev;
457 	insert_inode_hash(inode);
458 }
459 
460 void bdev_unhash(struct block_device *bdev)
461 {
462 	remove_inode_hash(BD_INODE(bdev));
463 }
464 
465 void bdev_drop(struct block_device *bdev)
466 {
467 	iput(BD_INODE(bdev));
468 }
469 
470 long nr_blockdev_pages(void)
471 {
472 	struct inode *inode;
473 	long ret = 0;
474 
475 	spin_lock(&blockdev_superblock->s_inode_list_lock);
476 	list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list)
477 		ret += inode->i_mapping->nrpages;
478 	spin_unlock(&blockdev_superblock->s_inode_list_lock);
479 
480 	return ret;
481 }
482 
483 /**
484  * bd_may_claim - test whether a block device can be claimed
485  * @bdev: block device of interest
486  * @holder: holder trying to claim @bdev
487  * @hops: holder ops
488  *
489  * Test whether @bdev can be claimed by @holder.
490  *
491  * RETURNS:
492  * %true if @bdev can be claimed, %false otherwise.
493  */
494 static bool bd_may_claim(struct block_device *bdev, void *holder,
495 		const struct blk_holder_ops *hops)
496 {
497 	struct block_device *whole = bdev_whole(bdev);
498 
499 	lockdep_assert_held(&bdev_lock);
500 
501 	if (bdev->bd_holder) {
502 		/*
503 		 * The same holder can always re-claim.
504 		 */
505 		if (bdev->bd_holder == holder) {
506 			if (WARN_ON_ONCE(bdev->bd_holder_ops != hops))
507 				return false;
508 			return true;
509 		}
510 		return false;
511 	}
512 
513 	/*
514 	 * If the whole devices holder is set to bd_may_claim, a partition on
515 	 * the device is claimed, but not the whole device.
516 	 */
517 	if (whole != bdev &&
518 	    whole->bd_holder && whole->bd_holder != bd_may_claim)
519 		return false;
520 	return true;
521 }
522 
523 /**
524  * bd_prepare_to_claim - claim a block device
525  * @bdev: block device of interest
526  * @holder: holder trying to claim @bdev
527  * @hops: holder ops.
528  *
529  * Claim @bdev.  This function fails if @bdev is already claimed by another
530  * holder and waits if another claiming is in progress. return, the caller
531  * has ownership of bd_claiming and bd_holder[s].
532  *
533  * RETURNS:
534  * 0 if @bdev can be claimed, -EBUSY otherwise.
535  */
536 int bd_prepare_to_claim(struct block_device *bdev, void *holder,
537 		const struct blk_holder_ops *hops)
538 {
539 	struct block_device *whole = bdev_whole(bdev);
540 
541 	if (WARN_ON_ONCE(!holder))
542 		return -EINVAL;
543 retry:
544 	mutex_lock(&bdev_lock);
545 	/* if someone else claimed, fail */
546 	if (!bd_may_claim(bdev, holder, hops)) {
547 		mutex_unlock(&bdev_lock);
548 		return -EBUSY;
549 	}
550 
551 	/* if claiming is already in progress, wait for it to finish */
552 	if (whole->bd_claiming) {
553 		wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0);
554 		DEFINE_WAIT(wait);
555 
556 		prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
557 		mutex_unlock(&bdev_lock);
558 		schedule();
559 		finish_wait(wq, &wait);
560 		goto retry;
561 	}
562 
563 	/* yay, all mine */
564 	whole->bd_claiming = holder;
565 	mutex_unlock(&bdev_lock);
566 	return 0;
567 }
568 EXPORT_SYMBOL_GPL(bd_prepare_to_claim); /* only for the loop driver */
569 
570 static void bd_clear_claiming(struct block_device *whole, void *holder)
571 {
572 	lockdep_assert_held(&bdev_lock);
573 	/* tell others that we're done */
574 	BUG_ON(whole->bd_claiming != holder);
575 	whole->bd_claiming = NULL;
576 	wake_up_bit(&whole->bd_claiming, 0);
577 }
578 
579 /**
580  * bd_finish_claiming - finish claiming of a block device
581  * @bdev: block device of interest
582  * @holder: holder that has claimed @bdev
583  * @hops: block device holder operations
584  *
585  * Finish exclusive open of a block device. Mark the device as exlusively
586  * open by the holder and wake up all waiters for exclusive open to finish.
587  */
588 static void bd_finish_claiming(struct block_device *bdev, void *holder,
589 		const struct blk_holder_ops *hops)
590 {
591 	struct block_device *whole = bdev_whole(bdev);
592 
593 	mutex_lock(&bdev_lock);
594 	BUG_ON(!bd_may_claim(bdev, holder, hops));
595 	/*
596 	 * Note that for a whole device bd_holders will be incremented twice,
597 	 * and bd_holder will be set to bd_may_claim before being set to holder
598 	 */
599 	whole->bd_holders++;
600 	whole->bd_holder = bd_may_claim;
601 	bdev->bd_holders++;
602 	mutex_lock(&bdev->bd_holder_lock);
603 	bdev->bd_holder = holder;
604 	bdev->bd_holder_ops = hops;
605 	mutex_unlock(&bdev->bd_holder_lock);
606 	bd_clear_claiming(whole, holder);
607 	mutex_unlock(&bdev_lock);
608 }
609 
610 /**
611  * bd_abort_claiming - abort claiming of a block device
612  * @bdev: block device of interest
613  * @holder: holder that has claimed @bdev
614  *
615  * Abort claiming of a block device when the exclusive open failed. This can be
616  * also used when exclusive open is not actually desired and we just needed
617  * to block other exclusive openers for a while.
618  */
619 void bd_abort_claiming(struct block_device *bdev, void *holder)
620 {
621 	mutex_lock(&bdev_lock);
622 	bd_clear_claiming(bdev_whole(bdev), holder);
623 	mutex_unlock(&bdev_lock);
624 }
625 EXPORT_SYMBOL(bd_abort_claiming);
626 
627 static void bd_end_claim(struct block_device *bdev, void *holder)
628 {
629 	struct block_device *whole = bdev_whole(bdev);
630 	bool unblock = false;
631 
632 	/*
633 	 * Release a claim on the device.  The holder fields are protected with
634 	 * bdev_lock.  open_mutex is used to synchronize disk_holder unlinking.
635 	 */
636 	mutex_lock(&bdev_lock);
637 	WARN_ON_ONCE(bdev->bd_holder != holder);
638 	WARN_ON_ONCE(--bdev->bd_holders < 0);
639 	WARN_ON_ONCE(--whole->bd_holders < 0);
640 	if (!bdev->bd_holders) {
641 		mutex_lock(&bdev->bd_holder_lock);
642 		bdev->bd_holder = NULL;
643 		bdev->bd_holder_ops = NULL;
644 		mutex_unlock(&bdev->bd_holder_lock);
645 		if (bdev->bd_write_holder)
646 			unblock = true;
647 	}
648 	if (!whole->bd_holders)
649 		whole->bd_holder = NULL;
650 	mutex_unlock(&bdev_lock);
651 
652 	/*
653 	 * If this was the last claim, remove holder link and unblock evpoll if
654 	 * it was a write holder.
655 	 */
656 	if (unblock) {
657 		disk_unblock_events(bdev->bd_disk);
658 		bdev->bd_write_holder = false;
659 	}
660 }
661 
662 static void blkdev_flush_mapping(struct block_device *bdev)
663 {
664 	WARN_ON_ONCE(bdev->bd_holders);
665 	sync_blockdev(bdev);
666 	kill_bdev(bdev);
667 	bdev_write_inode(bdev);
668 }
669 
670 static int blkdev_get_whole(struct block_device *bdev, blk_mode_t mode)
671 {
672 	struct gendisk *disk = bdev->bd_disk;
673 	int ret;
674 
675 	if (disk->fops->open) {
676 		ret = disk->fops->open(disk, mode);
677 		if (ret) {
678 			/* avoid ghost partitions on a removed medium */
679 			if (ret == -ENOMEDIUM &&
680 			     test_bit(GD_NEED_PART_SCAN, &disk->state))
681 				bdev_disk_changed(disk, true);
682 			return ret;
683 		}
684 	}
685 
686 	if (!atomic_read(&bdev->bd_openers))
687 		set_init_blocksize(bdev);
688 	if (test_bit(GD_NEED_PART_SCAN, &disk->state))
689 		bdev_disk_changed(disk, false);
690 	atomic_inc(&bdev->bd_openers);
691 	return 0;
692 }
693 
694 static void blkdev_put_whole(struct block_device *bdev)
695 {
696 	if (atomic_dec_and_test(&bdev->bd_openers))
697 		blkdev_flush_mapping(bdev);
698 	if (bdev->bd_disk->fops->release)
699 		bdev->bd_disk->fops->release(bdev->bd_disk);
700 }
701 
702 static int blkdev_get_part(struct block_device *part, blk_mode_t mode)
703 {
704 	struct gendisk *disk = part->bd_disk;
705 	int ret;
706 
707 	ret = blkdev_get_whole(bdev_whole(part), mode);
708 	if (ret)
709 		return ret;
710 
711 	ret = -ENXIO;
712 	if (!bdev_nr_sectors(part))
713 		goto out_blkdev_put;
714 
715 	if (!atomic_read(&part->bd_openers)) {
716 		disk->open_partitions++;
717 		set_init_blocksize(part);
718 	}
719 	atomic_inc(&part->bd_openers);
720 	return 0;
721 
722 out_blkdev_put:
723 	blkdev_put_whole(bdev_whole(part));
724 	return ret;
725 }
726 
727 int bdev_permission(dev_t dev, blk_mode_t mode, void *holder)
728 {
729 	int ret;
730 
731 	ret = devcgroup_check_permission(DEVCG_DEV_BLOCK,
732 			MAJOR(dev), MINOR(dev),
733 			((mode & BLK_OPEN_READ) ? DEVCG_ACC_READ : 0) |
734 			((mode & BLK_OPEN_WRITE) ? DEVCG_ACC_WRITE : 0));
735 	if (ret)
736 		return ret;
737 
738 	/* Blocking writes requires exclusive opener */
739 	if (mode & BLK_OPEN_RESTRICT_WRITES && !holder)
740 		return -EINVAL;
741 
742 	/*
743 	 * We're using error pointers to indicate to ->release() when we
744 	 * failed to open that block device. Also this doesn't make sense.
745 	 */
746 	if (WARN_ON_ONCE(IS_ERR(holder)))
747 		return -EINVAL;
748 
749 	return 0;
750 }
751 
752 static void blkdev_put_part(struct block_device *part)
753 {
754 	struct block_device *whole = bdev_whole(part);
755 
756 	if (atomic_dec_and_test(&part->bd_openers)) {
757 		blkdev_flush_mapping(part);
758 		whole->bd_disk->open_partitions--;
759 	}
760 	blkdev_put_whole(whole);
761 }
762 
763 struct block_device *blkdev_get_no_open(dev_t dev)
764 {
765 	struct block_device *bdev;
766 	struct inode *inode;
767 
768 	inode = ilookup(blockdev_superblock, dev);
769 	if (!inode && IS_ENABLED(CONFIG_BLOCK_LEGACY_AUTOLOAD)) {
770 		blk_request_module(dev);
771 		inode = ilookup(blockdev_superblock, dev);
772 		if (inode)
773 			pr_warn_ratelimited(
774 "block device autoloading is deprecated and will be removed.\n");
775 	}
776 	if (!inode)
777 		return NULL;
778 
779 	/* switch from the inode reference to a device mode one: */
780 	bdev = &BDEV_I(inode)->bdev;
781 	if (!kobject_get_unless_zero(&bdev->bd_device.kobj))
782 		bdev = NULL;
783 	iput(inode);
784 	return bdev;
785 }
786 
787 void blkdev_put_no_open(struct block_device *bdev)
788 {
789 	put_device(&bdev->bd_device);
790 }
791 
792 static bool bdev_writes_blocked(struct block_device *bdev)
793 {
794 	return bdev->bd_writers < 0;
795 }
796 
797 static void bdev_block_writes(struct block_device *bdev)
798 {
799 	bdev->bd_writers--;
800 }
801 
802 static void bdev_unblock_writes(struct block_device *bdev)
803 {
804 	bdev->bd_writers++;
805 }
806 
807 static bool bdev_may_open(struct block_device *bdev, blk_mode_t mode)
808 {
809 	if (bdev_allow_write_mounted)
810 		return true;
811 	/* Writes blocked? */
812 	if (mode & BLK_OPEN_WRITE && bdev_writes_blocked(bdev))
813 		return false;
814 	if (mode & BLK_OPEN_RESTRICT_WRITES && bdev->bd_writers > 0)
815 		return false;
816 	return true;
817 }
818 
819 static void bdev_claim_write_access(struct block_device *bdev, blk_mode_t mode)
820 {
821 	if (bdev_allow_write_mounted)
822 		return;
823 
824 	/* Claim exclusive or shared write access. */
825 	if (mode & BLK_OPEN_RESTRICT_WRITES)
826 		bdev_block_writes(bdev);
827 	else if (mode & BLK_OPEN_WRITE)
828 		bdev->bd_writers++;
829 }
830 
831 static inline bool bdev_unclaimed(const struct file *bdev_file)
832 {
833 	return bdev_file->private_data == BDEV_I(bdev_file->f_mapping->host);
834 }
835 
836 static void bdev_yield_write_access(struct file *bdev_file)
837 {
838 	struct block_device *bdev;
839 
840 	if (bdev_allow_write_mounted)
841 		return;
842 
843 	if (bdev_unclaimed(bdev_file))
844 		return;
845 
846 	bdev = file_bdev(bdev_file);
847 
848 	if (bdev_file->f_mode & FMODE_WRITE_RESTRICTED)
849 		bdev_unblock_writes(bdev);
850 	else if (bdev_file->f_mode & FMODE_WRITE)
851 		bdev->bd_writers--;
852 }
853 
854 /**
855  * bdev_open - open a block device
856  * @bdev: block device to open
857  * @mode: open mode (BLK_OPEN_*)
858  * @holder: exclusive holder identifier
859  * @hops: holder operations
860  * @bdev_file: file for the block device
861  *
862  * Open the block device. If @holder is not %NULL, the block device is opened
863  * with exclusive access.  Exclusive opens may nest for the same @holder.
864  *
865  * CONTEXT:
866  * Might sleep.
867  *
868  * RETURNS:
869  * zero on success, -errno on failure.
870  */
871 int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder,
872 	      const struct blk_holder_ops *hops, struct file *bdev_file)
873 {
874 	bool unblock_events = true;
875 	struct gendisk *disk = bdev->bd_disk;
876 	int ret;
877 
878 	if (holder) {
879 		mode |= BLK_OPEN_EXCL;
880 		ret = bd_prepare_to_claim(bdev, holder, hops);
881 		if (ret)
882 			return ret;
883 	} else {
884 		if (WARN_ON_ONCE(mode & BLK_OPEN_EXCL))
885 			return -EIO;
886 	}
887 
888 	disk_block_events(disk);
889 
890 	mutex_lock(&disk->open_mutex);
891 	ret = -ENXIO;
892 	if (!disk_live(disk))
893 		goto abort_claiming;
894 	if (!try_module_get(disk->fops->owner))
895 		goto abort_claiming;
896 	ret = -EBUSY;
897 	if (!bdev_may_open(bdev, mode))
898 		goto abort_claiming;
899 	if (bdev_is_partition(bdev))
900 		ret = blkdev_get_part(bdev, mode);
901 	else
902 		ret = blkdev_get_whole(bdev, mode);
903 	if (ret)
904 		goto put_module;
905 	bdev_claim_write_access(bdev, mode);
906 	if (holder) {
907 		bd_finish_claiming(bdev, holder, hops);
908 
909 		/*
910 		 * Block event polling for write claims if requested.  Any write
911 		 * holder makes the write_holder state stick until all are
912 		 * released.  This is good enough and tracking individual
913 		 * writeable reference is too fragile given the way @mode is
914 		 * used in blkdev_get/put().
915 		 */
916 		if ((mode & BLK_OPEN_WRITE) && !bdev->bd_write_holder &&
917 		    (disk->event_flags & DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE)) {
918 			bdev->bd_write_holder = true;
919 			unblock_events = false;
920 		}
921 	}
922 	mutex_unlock(&disk->open_mutex);
923 
924 	if (unblock_events)
925 		disk_unblock_events(disk);
926 
927 	bdev_file->f_flags |= O_LARGEFILE;
928 	bdev_file->f_mode |= FMODE_BUF_RASYNC | FMODE_CAN_ODIRECT;
929 	if (bdev_nowait(bdev))
930 		bdev_file->f_mode |= FMODE_NOWAIT;
931 	if (mode & BLK_OPEN_RESTRICT_WRITES)
932 		bdev_file->f_mode |= FMODE_WRITE_RESTRICTED;
933 	bdev_file->f_mapping = bdev->bd_mapping;
934 	bdev_file->f_wb_err = filemap_sample_wb_err(bdev_file->f_mapping);
935 	bdev_file->private_data = holder;
936 
937 	return 0;
938 put_module:
939 	module_put(disk->fops->owner);
940 abort_claiming:
941 	if (holder)
942 		bd_abort_claiming(bdev, holder);
943 	mutex_unlock(&disk->open_mutex);
944 	disk_unblock_events(disk);
945 	return ret;
946 }
947 
948 /*
949  * If BLK_OPEN_WRITE_IOCTL is set then this is a historical quirk
950  * associated with the floppy driver where it has allowed ioctls if the
951  * file was opened for writing, but does not allow reads or writes.
952  * Make sure that this quirk is reflected in @f_flags.
953  *
954  * It can also happen if a block device is opened as O_RDWR | O_WRONLY.
955  */
956 static unsigned blk_to_file_flags(blk_mode_t mode)
957 {
958 	unsigned int flags = 0;
959 
960 	if ((mode & (BLK_OPEN_READ | BLK_OPEN_WRITE)) ==
961 	    (BLK_OPEN_READ | BLK_OPEN_WRITE))
962 		flags |= O_RDWR;
963 	else if (mode & BLK_OPEN_WRITE_IOCTL)
964 		flags |= O_RDWR | O_WRONLY;
965 	else if (mode & BLK_OPEN_WRITE)
966 		flags |= O_WRONLY;
967 	else if (mode & BLK_OPEN_READ)
968 		flags |= O_RDONLY; /* homeopathic, because O_RDONLY is 0 */
969 	else
970 		WARN_ON_ONCE(true);
971 
972 	if (mode & BLK_OPEN_NDELAY)
973 		flags |= O_NDELAY;
974 
975 	return flags;
976 }
977 
978 struct file *bdev_file_open_by_dev(dev_t dev, blk_mode_t mode, void *holder,
979 				   const struct blk_holder_ops *hops)
980 {
981 	struct file *bdev_file;
982 	struct block_device *bdev;
983 	unsigned int flags;
984 	int ret;
985 
986 	ret = bdev_permission(dev, mode, holder);
987 	if (ret)
988 		return ERR_PTR(ret);
989 
990 	bdev = blkdev_get_no_open(dev);
991 	if (!bdev)
992 		return ERR_PTR(-ENXIO);
993 
994 	flags = blk_to_file_flags(mode);
995 	bdev_file = alloc_file_pseudo_noaccount(BD_INODE(bdev),
996 			blockdev_mnt, "", flags | O_LARGEFILE, &def_blk_fops);
997 	if (IS_ERR(bdev_file)) {
998 		blkdev_put_no_open(bdev);
999 		return bdev_file;
1000 	}
1001 	ihold(BD_INODE(bdev));
1002 
1003 	ret = bdev_open(bdev, mode, holder, hops, bdev_file);
1004 	if (ret) {
1005 		/* We failed to open the block device. Let ->release() know. */
1006 		bdev_file->private_data = ERR_PTR(ret);
1007 		fput(bdev_file);
1008 		return ERR_PTR(ret);
1009 	}
1010 	return bdev_file;
1011 }
1012 EXPORT_SYMBOL(bdev_file_open_by_dev);
1013 
1014 struct file *bdev_file_open_by_path(const char *path, blk_mode_t mode,
1015 				    void *holder,
1016 				    const struct blk_holder_ops *hops)
1017 {
1018 	struct file *file;
1019 	dev_t dev;
1020 	int error;
1021 
1022 	error = lookup_bdev(path, &dev);
1023 	if (error)
1024 		return ERR_PTR(error);
1025 
1026 	file = bdev_file_open_by_dev(dev, mode, holder, hops);
1027 	if (!IS_ERR(file) && (mode & BLK_OPEN_WRITE)) {
1028 		if (bdev_read_only(file_bdev(file))) {
1029 			fput(file);
1030 			file = ERR_PTR(-EACCES);
1031 		}
1032 	}
1033 
1034 	return file;
1035 }
1036 EXPORT_SYMBOL(bdev_file_open_by_path);
1037 
1038 static inline void bd_yield_claim(struct file *bdev_file)
1039 {
1040 	struct block_device *bdev = file_bdev(bdev_file);
1041 	void *holder = bdev_file->private_data;
1042 
1043 	lockdep_assert_held(&bdev->bd_disk->open_mutex);
1044 
1045 	if (WARN_ON_ONCE(IS_ERR_OR_NULL(holder)))
1046 		return;
1047 
1048 	if (!bdev_unclaimed(bdev_file))
1049 		bd_end_claim(bdev, holder);
1050 }
1051 
1052 void bdev_release(struct file *bdev_file)
1053 {
1054 	struct block_device *bdev = file_bdev(bdev_file);
1055 	void *holder = bdev_file->private_data;
1056 	struct gendisk *disk = bdev->bd_disk;
1057 
1058 	/* We failed to open that block device. */
1059 	if (IS_ERR(holder))
1060 		goto put_no_open;
1061 
1062 	/*
1063 	 * Sync early if it looks like we're the last one.  If someone else
1064 	 * opens the block device between now and the decrement of bd_openers
1065 	 * then we did a sync that we didn't need to, but that's not the end
1066 	 * of the world and we want to avoid long (could be several minute)
1067 	 * syncs while holding the mutex.
1068 	 */
1069 	if (atomic_read(&bdev->bd_openers) == 1)
1070 		sync_blockdev(bdev);
1071 
1072 	mutex_lock(&disk->open_mutex);
1073 	bdev_yield_write_access(bdev_file);
1074 
1075 	if (holder)
1076 		bd_yield_claim(bdev_file);
1077 
1078 	/*
1079 	 * Trigger event checking and tell drivers to flush MEDIA_CHANGE
1080 	 * event.  This is to ensure detection of media removal commanded
1081 	 * from userland - e.g. eject(1).
1082 	 */
1083 	disk_flush_events(disk, DISK_EVENT_MEDIA_CHANGE);
1084 
1085 	if (bdev_is_partition(bdev))
1086 		blkdev_put_part(bdev);
1087 	else
1088 		blkdev_put_whole(bdev);
1089 	mutex_unlock(&disk->open_mutex);
1090 
1091 	module_put(disk->fops->owner);
1092 put_no_open:
1093 	blkdev_put_no_open(bdev);
1094 }
1095 
1096 /**
1097  * bdev_fput - yield claim to the block device and put the file
1098  * @bdev_file: open block device
1099  *
1100  * Yield claim on the block device and put the file. Ensure that the
1101  * block device can be reclaimed before the file is closed which is a
1102  * deferred operation.
1103  */
1104 void bdev_fput(struct file *bdev_file)
1105 {
1106 	if (WARN_ON_ONCE(bdev_file->f_op != &def_blk_fops))
1107 		return;
1108 
1109 	if (bdev_file->private_data) {
1110 		struct block_device *bdev = file_bdev(bdev_file);
1111 		struct gendisk *disk = bdev->bd_disk;
1112 
1113 		mutex_lock(&disk->open_mutex);
1114 		bdev_yield_write_access(bdev_file);
1115 		bd_yield_claim(bdev_file);
1116 		/*
1117 		 * Tell release we already gave up our hold on the
1118 		 * device and if write restrictions are available that
1119 		 * we already gave up write access to the device.
1120 		 */
1121 		bdev_file->private_data = BDEV_I(bdev_file->f_mapping->host);
1122 		mutex_unlock(&disk->open_mutex);
1123 	}
1124 
1125 	fput(bdev_file);
1126 }
1127 EXPORT_SYMBOL(bdev_fput);
1128 
1129 /**
1130  * lookup_bdev() - Look up a struct block_device by name.
1131  * @pathname: Name of the block device in the filesystem.
1132  * @dev: Pointer to the block device's dev_t, if found.
1133  *
1134  * Lookup the block device's dev_t at @pathname in the current
1135  * namespace if possible and return it in @dev.
1136  *
1137  * Context: May sleep.
1138  * Return: 0 if succeeded, negative errno otherwise.
1139  */
1140 int lookup_bdev(const char *pathname, dev_t *dev)
1141 {
1142 	struct inode *inode;
1143 	struct path path;
1144 	int error;
1145 
1146 	if (!pathname || !*pathname)
1147 		return -EINVAL;
1148 
1149 	error = kern_path(pathname, LOOKUP_FOLLOW, &path);
1150 	if (error)
1151 		return error;
1152 
1153 	inode = d_backing_inode(path.dentry);
1154 	error = -ENOTBLK;
1155 	if (!S_ISBLK(inode->i_mode))
1156 		goto out_path_put;
1157 	error = -EACCES;
1158 	if (!may_open_dev(&path))
1159 		goto out_path_put;
1160 
1161 	*dev = inode->i_rdev;
1162 	error = 0;
1163 out_path_put:
1164 	path_put(&path);
1165 	return error;
1166 }
1167 EXPORT_SYMBOL(lookup_bdev);
1168 
1169 /**
1170  * bdev_mark_dead - mark a block device as dead
1171  * @bdev: block device to operate on
1172  * @surprise: indicate a surprise removal
1173  *
1174  * Tell the file system that this devices or media is dead.  If @surprise is set
1175  * to %true the device or media is already gone, if not we are preparing for an
1176  * orderly removal.
1177  *
1178  * This calls into the file system, which then typicall syncs out all dirty data
1179  * and writes back inodes and then invalidates any cached data in the inodes on
1180  * the file system.  In addition we also invalidate the block device mapping.
1181  */
1182 void bdev_mark_dead(struct block_device *bdev, bool surprise)
1183 {
1184 	mutex_lock(&bdev->bd_holder_lock);
1185 	if (bdev->bd_holder_ops && bdev->bd_holder_ops->mark_dead)
1186 		bdev->bd_holder_ops->mark_dead(bdev, surprise);
1187 	else {
1188 		mutex_unlock(&bdev->bd_holder_lock);
1189 		sync_blockdev(bdev);
1190 	}
1191 
1192 	invalidate_bdev(bdev);
1193 }
1194 /*
1195  * New drivers should not use this directly.  There are some drivers however
1196  * that needs this for historical reasons. For example, the DASD driver has
1197  * historically had a shutdown to offline mode that doesn't actually remove the
1198  * gendisk that otherwise looks a lot like a safe device removal.
1199  */
1200 EXPORT_SYMBOL_GPL(bdev_mark_dead);
1201 
1202 void sync_bdevs(bool wait)
1203 {
1204 	struct inode *inode, *old_inode = NULL;
1205 
1206 	spin_lock(&blockdev_superblock->s_inode_list_lock);
1207 	list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) {
1208 		struct address_space *mapping = inode->i_mapping;
1209 		struct block_device *bdev;
1210 
1211 		spin_lock(&inode->i_lock);
1212 		if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) ||
1213 		    mapping->nrpages == 0) {
1214 			spin_unlock(&inode->i_lock);
1215 			continue;
1216 		}
1217 		__iget(inode);
1218 		spin_unlock(&inode->i_lock);
1219 		spin_unlock(&blockdev_superblock->s_inode_list_lock);
1220 		/*
1221 		 * We hold a reference to 'inode' so it couldn't have been
1222 		 * removed from s_inodes list while we dropped the
1223 		 * s_inode_list_lock  We cannot iput the inode now as we can
1224 		 * be holding the last reference and we cannot iput it under
1225 		 * s_inode_list_lock. So we keep the reference and iput it
1226 		 * later.
1227 		 */
1228 		iput(old_inode);
1229 		old_inode = inode;
1230 		bdev = I_BDEV(inode);
1231 
1232 		mutex_lock(&bdev->bd_disk->open_mutex);
1233 		if (!atomic_read(&bdev->bd_openers)) {
1234 			; /* skip */
1235 		} else if (wait) {
1236 			/*
1237 			 * We keep the error status of individual mapping so
1238 			 * that applications can catch the writeback error using
1239 			 * fsync(2). See filemap_fdatawait_keep_errors() for
1240 			 * details.
1241 			 */
1242 			filemap_fdatawait_keep_errors(inode->i_mapping);
1243 		} else {
1244 			filemap_fdatawrite(inode->i_mapping);
1245 		}
1246 		mutex_unlock(&bdev->bd_disk->open_mutex);
1247 
1248 		spin_lock(&blockdev_superblock->s_inode_list_lock);
1249 	}
1250 	spin_unlock(&blockdev_superblock->s_inode_list_lock);
1251 	iput(old_inode);
1252 }
1253 
1254 /*
1255  * Handle STATX_DIOALIGN for block devices.
1256  *
1257  * Note that the inode passed to this is the inode of a block device node file,
1258  * not the block device's internal inode.  Therefore it is *not* valid to use
1259  * I_BDEV() here; the block device has to be looked up by i_rdev instead.
1260  */
1261 void bdev_statx_dioalign(struct inode *inode, struct kstat *stat)
1262 {
1263 	struct block_device *bdev;
1264 
1265 	bdev = blkdev_get_no_open(inode->i_rdev);
1266 	if (!bdev)
1267 		return;
1268 
1269 	stat->dio_mem_align = bdev_dma_alignment(bdev) + 1;
1270 	stat->dio_offset_align = bdev_logical_block_size(bdev);
1271 	stat->result_mask |= STATX_DIOALIGN;
1272 
1273 	blkdev_put_no_open(bdev);
1274 }
1275 
1276 bool disk_live(struct gendisk *disk)
1277 {
1278 	return !inode_unhashed(BD_INODE(disk->part0));
1279 }
1280 EXPORT_SYMBOL_GPL(disk_live);
1281 
1282 unsigned int block_size(struct block_device *bdev)
1283 {
1284 	return 1 << BD_INODE(bdev)->i_blkbits;
1285 }
1286 EXPORT_SYMBOL_GPL(block_size);
1287 
1288 static int __init setup_bdev_allow_write_mounted(char *str)
1289 {
1290 	if (kstrtobool(str, &bdev_allow_write_mounted))
1291 		pr_warn("Invalid option string for bdev_allow_write_mounted:"
1292 			" '%s'\n", str);
1293 	return 1;
1294 }
1295 __setup("bdev_allow_write_mounted=", setup_bdev_allow_write_mounted);
1296