1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (C) 2001 Jens Axboe <[email protected]> 4 */ 5 #ifndef __LINUX_BIO_H 6 #define __LINUX_BIO_H 7 8 #include <linux/mempool.h> 9 /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */ 10 #include <linux/blk_types.h> 11 #include <linux/uio.h> 12 13 #define BIO_MAX_VECS 256U 14 15 static inline unsigned int bio_max_segs(unsigned int nr_segs) 16 { 17 return min(nr_segs, BIO_MAX_VECS); 18 } 19 20 #define bio_prio(bio) (bio)->bi_ioprio 21 #define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio) 22 23 #define bio_iter_iovec(bio, iter) \ 24 bvec_iter_bvec((bio)->bi_io_vec, (iter)) 25 26 #define bio_iter_page(bio, iter) \ 27 bvec_iter_page((bio)->bi_io_vec, (iter)) 28 #define bio_iter_len(bio, iter) \ 29 bvec_iter_len((bio)->bi_io_vec, (iter)) 30 #define bio_iter_offset(bio, iter) \ 31 bvec_iter_offset((bio)->bi_io_vec, (iter)) 32 33 #define bio_page(bio) bio_iter_page((bio), (bio)->bi_iter) 34 #define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter) 35 #define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter) 36 37 #define bvec_iter_sectors(iter) ((iter).bi_size >> 9) 38 #define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter))) 39 40 #define bio_sectors(bio) bvec_iter_sectors((bio)->bi_iter) 41 #define bio_end_sector(bio) bvec_iter_end_sector((bio)->bi_iter) 42 43 /* 44 * Return the data direction, READ or WRITE. 45 */ 46 #define bio_data_dir(bio) \ 47 (op_is_write(bio_op(bio)) ? WRITE : READ) 48 49 /* 50 * Check whether this bio carries any data or not. A NULL bio is allowed. 51 */ 52 static inline bool bio_has_data(struct bio *bio) 53 { 54 if (bio && 55 bio->bi_iter.bi_size && 56 bio_op(bio) != REQ_OP_DISCARD && 57 bio_op(bio) != REQ_OP_SECURE_ERASE && 58 bio_op(bio) != REQ_OP_WRITE_ZEROES) 59 return true; 60 61 return false; 62 } 63 64 static inline bool bio_no_advance_iter(const struct bio *bio) 65 { 66 return bio_op(bio) == REQ_OP_DISCARD || 67 bio_op(bio) == REQ_OP_SECURE_ERASE || 68 bio_op(bio) == REQ_OP_WRITE_SAME || 69 bio_op(bio) == REQ_OP_WRITE_ZEROES; 70 } 71 72 static inline void *bio_data(struct bio *bio) 73 { 74 if (bio_has_data(bio)) 75 return page_address(bio_page(bio)) + bio_offset(bio); 76 77 return NULL; 78 } 79 80 static inline bool bio_next_segment(const struct bio *bio, 81 struct bvec_iter_all *iter) 82 { 83 if (iter->idx >= bio->bi_vcnt) 84 return false; 85 86 bvec_advance(&bio->bi_io_vec[iter->idx], iter); 87 return true; 88 } 89 90 /* 91 * drivers should _never_ use the all version - the bio may have been split 92 * before it got to the driver and the driver won't own all of it 93 */ 94 #define bio_for_each_segment_all(bvl, bio, iter) \ 95 for (bvl = bvec_init_iter_all(&iter); bio_next_segment((bio), &iter); ) 96 97 static inline void bio_advance_iter(const struct bio *bio, 98 struct bvec_iter *iter, unsigned int bytes) 99 { 100 iter->bi_sector += bytes >> 9; 101 102 if (bio_no_advance_iter(bio)) 103 iter->bi_size -= bytes; 104 else 105 bvec_iter_advance(bio->bi_io_vec, iter, bytes); 106 /* TODO: It is reasonable to complete bio with error here. */ 107 } 108 109 /* @bytes should be less or equal to bvec[i->bi_idx].bv_len */ 110 static inline void bio_advance_iter_single(const struct bio *bio, 111 struct bvec_iter *iter, 112 unsigned int bytes) 113 { 114 iter->bi_sector += bytes >> 9; 115 116 if (bio_no_advance_iter(bio)) 117 iter->bi_size -= bytes; 118 else 119 bvec_iter_advance_single(bio->bi_io_vec, iter, bytes); 120 } 121 122 void __bio_advance(struct bio *, unsigned bytes); 123 124 /** 125 * bio_advance - increment/complete a bio by some number of bytes 126 * @bio: bio to advance 127 * @nbytes: number of bytes to complete 128 * 129 * This updates bi_sector, bi_size and bi_idx; if the number of bytes to 130 * complete doesn't align with a bvec boundary, then bv_len and bv_offset will 131 * be updated on the last bvec as well. 132 * 133 * @bio will then represent the remaining, uncompleted portion of the io. 134 */ 135 static inline void bio_advance(struct bio *bio, unsigned int nbytes) 136 { 137 if (nbytes == bio->bi_iter.bi_size) { 138 bio->bi_iter.bi_size = 0; 139 return; 140 } 141 __bio_advance(bio, nbytes); 142 } 143 144 #define __bio_for_each_segment(bvl, bio, iter, start) \ 145 for (iter = (start); \ 146 (iter).bi_size && \ 147 ((bvl = bio_iter_iovec((bio), (iter))), 1); \ 148 bio_advance_iter_single((bio), &(iter), (bvl).bv_len)) 149 150 #define bio_for_each_segment(bvl, bio, iter) \ 151 __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter) 152 153 #define __bio_for_each_bvec(bvl, bio, iter, start) \ 154 for (iter = (start); \ 155 (iter).bi_size && \ 156 ((bvl = mp_bvec_iter_bvec((bio)->bi_io_vec, (iter))), 1); \ 157 bio_advance_iter_single((bio), &(iter), (bvl).bv_len)) 158 159 /* iterate over multi-page bvec */ 160 #define bio_for_each_bvec(bvl, bio, iter) \ 161 __bio_for_each_bvec(bvl, bio, iter, (bio)->bi_iter) 162 163 /* 164 * Iterate over all multi-page bvecs. Drivers shouldn't use this version for the 165 * same reasons as bio_for_each_segment_all(). 166 */ 167 #define bio_for_each_bvec_all(bvl, bio, i) \ 168 for (i = 0, bvl = bio_first_bvec_all(bio); \ 169 i < (bio)->bi_vcnt; i++, bvl++) 170 171 #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len) 172 173 static inline unsigned bio_segments(struct bio *bio) 174 { 175 unsigned segs = 0; 176 struct bio_vec bv; 177 struct bvec_iter iter; 178 179 /* 180 * We special case discard/write same/write zeroes, because they 181 * interpret bi_size differently: 182 */ 183 184 switch (bio_op(bio)) { 185 case REQ_OP_DISCARD: 186 case REQ_OP_SECURE_ERASE: 187 case REQ_OP_WRITE_ZEROES: 188 return 0; 189 case REQ_OP_WRITE_SAME: 190 return 1; 191 default: 192 break; 193 } 194 195 bio_for_each_segment(bv, bio, iter) 196 segs++; 197 198 return segs; 199 } 200 201 /* 202 * get a reference to a bio, so it won't disappear. the intended use is 203 * something like: 204 * 205 * bio_get(bio); 206 * submit_bio(rw, bio); 207 * if (bio->bi_flags ...) 208 * do_something 209 * bio_put(bio); 210 * 211 * without the bio_get(), it could potentially complete I/O before submit_bio 212 * returns. and then bio would be freed memory when if (bio->bi_flags ...) 213 * runs 214 */ 215 static inline void bio_get(struct bio *bio) 216 { 217 bio->bi_flags |= (1 << BIO_REFFED); 218 smp_mb__before_atomic(); 219 atomic_inc(&bio->__bi_cnt); 220 } 221 222 static inline void bio_cnt_set(struct bio *bio, unsigned int count) 223 { 224 if (count != 1) { 225 bio->bi_flags |= (1 << BIO_REFFED); 226 smp_mb(); 227 } 228 atomic_set(&bio->__bi_cnt, count); 229 } 230 231 static inline bool bio_flagged(struct bio *bio, unsigned int bit) 232 { 233 return (bio->bi_flags & (1U << bit)) != 0; 234 } 235 236 static inline void bio_set_flag(struct bio *bio, unsigned int bit) 237 { 238 bio->bi_flags |= (1U << bit); 239 } 240 241 static inline void bio_clear_flag(struct bio *bio, unsigned int bit) 242 { 243 bio->bi_flags &= ~(1U << bit); 244 } 245 246 static inline struct bio_vec *bio_first_bvec_all(struct bio *bio) 247 { 248 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); 249 return bio->bi_io_vec; 250 } 251 252 static inline struct page *bio_first_page_all(struct bio *bio) 253 { 254 return bio_first_bvec_all(bio)->bv_page; 255 } 256 257 static inline struct bio_vec *bio_last_bvec_all(struct bio *bio) 258 { 259 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); 260 return &bio->bi_io_vec[bio->bi_vcnt - 1]; 261 } 262 263 /** 264 * struct folio_iter - State for iterating all folios in a bio. 265 * @folio: The current folio we're iterating. NULL after the last folio. 266 * @offset: The byte offset within the current folio. 267 * @length: The number of bytes in this iteration (will not cross folio 268 * boundary). 269 */ 270 struct folio_iter { 271 struct folio *folio; 272 size_t offset; 273 size_t length; 274 /* private: for use by the iterator */ 275 size_t _seg_count; 276 int _i; 277 }; 278 279 static inline void bio_first_folio(struct folio_iter *fi, struct bio *bio, 280 int i) 281 { 282 struct bio_vec *bvec = bio_first_bvec_all(bio) + i; 283 284 fi->folio = page_folio(bvec->bv_page); 285 fi->offset = bvec->bv_offset + 286 PAGE_SIZE * (bvec->bv_page - &fi->folio->page); 287 fi->_seg_count = bvec->bv_len; 288 fi->length = min(folio_size(fi->folio) - fi->offset, fi->_seg_count); 289 fi->_i = i; 290 } 291 292 static inline void bio_next_folio(struct folio_iter *fi, struct bio *bio) 293 { 294 fi->_seg_count -= fi->length; 295 if (fi->_seg_count) { 296 fi->folio = folio_next(fi->folio); 297 fi->offset = 0; 298 fi->length = min(folio_size(fi->folio), fi->_seg_count); 299 } else if (fi->_i + 1 < bio->bi_vcnt) { 300 bio_first_folio(fi, bio, fi->_i + 1); 301 } else { 302 fi->folio = NULL; 303 } 304 } 305 306 /** 307 * bio_for_each_folio_all - Iterate over each folio in a bio. 308 * @fi: struct folio_iter which is updated for each folio. 309 * @bio: struct bio to iterate over. 310 */ 311 #define bio_for_each_folio_all(fi, bio) \ 312 for (bio_first_folio(&fi, bio, 0); fi.folio; bio_next_folio(&fi, bio)) 313 314 enum bip_flags { 315 BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */ 316 BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */ 317 BIP_CTRL_NOCHECK = 1 << 2, /* disable HBA integrity checking */ 318 BIP_DISK_NOCHECK = 1 << 3, /* disable disk integrity checking */ 319 BIP_IP_CHECKSUM = 1 << 4, /* IP checksum */ 320 }; 321 322 /* 323 * bio integrity payload 324 */ 325 struct bio_integrity_payload { 326 struct bio *bip_bio; /* parent bio */ 327 328 struct bvec_iter bip_iter; 329 330 unsigned short bip_vcnt; /* # of integrity bio_vecs */ 331 unsigned short bip_max_vcnt; /* integrity bio_vec slots */ 332 unsigned short bip_flags; /* control flags */ 333 334 struct bvec_iter bio_iter; /* for rewinding parent bio */ 335 336 struct work_struct bip_work; /* I/O completion */ 337 338 struct bio_vec *bip_vec; 339 struct bio_vec bip_inline_vecs[];/* embedded bvec array */ 340 }; 341 342 #if defined(CONFIG_BLK_DEV_INTEGRITY) 343 344 static inline struct bio_integrity_payload *bio_integrity(struct bio *bio) 345 { 346 if (bio->bi_opf & REQ_INTEGRITY) 347 return bio->bi_integrity; 348 349 return NULL; 350 } 351 352 static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag) 353 { 354 struct bio_integrity_payload *bip = bio_integrity(bio); 355 356 if (bip) 357 return bip->bip_flags & flag; 358 359 return false; 360 } 361 362 static inline sector_t bip_get_seed(struct bio_integrity_payload *bip) 363 { 364 return bip->bip_iter.bi_sector; 365 } 366 367 static inline void bip_set_seed(struct bio_integrity_payload *bip, 368 sector_t seed) 369 { 370 bip->bip_iter.bi_sector = seed; 371 } 372 373 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 374 375 void bio_trim(struct bio *bio, sector_t offset, sector_t size); 376 extern struct bio *bio_split(struct bio *bio, int sectors, 377 gfp_t gfp, struct bio_set *bs); 378 379 /** 380 * bio_next_split - get next @sectors from a bio, splitting if necessary 381 * @bio: bio to split 382 * @sectors: number of sectors to split from the front of @bio 383 * @gfp: gfp mask 384 * @bs: bio set to allocate from 385 * 386 * Return: a bio representing the next @sectors of @bio - if the bio is smaller 387 * than @sectors, returns the original bio unchanged. 388 */ 389 static inline struct bio *bio_next_split(struct bio *bio, int sectors, 390 gfp_t gfp, struct bio_set *bs) 391 { 392 if (sectors >= bio_sectors(bio)) 393 return bio; 394 395 return bio_split(bio, sectors, gfp, bs); 396 } 397 398 enum { 399 BIOSET_NEED_BVECS = BIT(0), 400 BIOSET_NEED_RESCUER = BIT(1), 401 BIOSET_PERCPU_CACHE = BIT(2), 402 }; 403 extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags); 404 extern void bioset_exit(struct bio_set *); 405 extern int biovec_init_pool(mempool_t *pool, int pool_entries); 406 extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src); 407 408 struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs, 409 unsigned int opf, gfp_t gfp_mask, 410 struct bio_set *bs); 411 struct bio *bio_alloc_kiocb(struct kiocb *kiocb, struct block_device *bdev, 412 unsigned short nr_vecs, unsigned int opf, struct bio_set *bs); 413 struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs); 414 extern void bio_put(struct bio *); 415 416 struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src, 417 gfp_t gfp, struct bio_set *bs); 418 int bio_init_clone(struct block_device *bdev, struct bio *bio, 419 struct bio *bio_src, gfp_t gfp); 420 421 extern struct bio_set fs_bio_set; 422 423 static inline struct bio *bio_alloc(struct block_device *bdev, 424 unsigned short nr_vecs, unsigned int opf, gfp_t gfp_mask) 425 { 426 return bio_alloc_bioset(bdev, nr_vecs, opf, gfp_mask, &fs_bio_set); 427 } 428 429 void submit_bio(struct bio *bio); 430 431 extern void bio_endio(struct bio *); 432 433 static inline void bio_io_error(struct bio *bio) 434 { 435 bio->bi_status = BLK_STS_IOERR; 436 bio_endio(bio); 437 } 438 439 static inline void bio_wouldblock_error(struct bio *bio) 440 { 441 bio_set_flag(bio, BIO_QUIET); 442 bio->bi_status = BLK_STS_AGAIN; 443 bio_endio(bio); 444 } 445 446 /* 447 * Calculate number of bvec segments that should be allocated to fit data 448 * pointed by @iter. If @iter is backed by bvec it's going to be reused 449 * instead of allocating a new one. 450 */ 451 static inline int bio_iov_vecs_to_alloc(struct iov_iter *iter, int max_segs) 452 { 453 if (iov_iter_is_bvec(iter)) 454 return 0; 455 return iov_iter_npages(iter, max_segs); 456 } 457 458 struct request_queue; 459 460 extern int submit_bio_wait(struct bio *bio); 461 void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table, 462 unsigned short max_vecs, unsigned int opf); 463 extern void bio_uninit(struct bio *); 464 void bio_reset(struct bio *bio, struct block_device *bdev, unsigned int opf); 465 void bio_chain(struct bio *, struct bio *); 466 467 int bio_add_page(struct bio *, struct page *, unsigned len, unsigned off); 468 bool bio_add_folio(struct bio *, struct folio *, size_t len, size_t off); 469 extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, 470 unsigned int, unsigned int); 471 int bio_add_zone_append_page(struct bio *bio, struct page *page, 472 unsigned int len, unsigned int offset); 473 void __bio_add_page(struct bio *bio, struct page *page, 474 unsigned int len, unsigned int off); 475 int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter); 476 void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter); 477 void __bio_release_pages(struct bio *bio, bool mark_dirty); 478 extern void bio_set_pages_dirty(struct bio *bio); 479 extern void bio_check_pages_dirty(struct bio *bio); 480 481 extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter, 482 struct bio *src, struct bvec_iter *src_iter); 483 extern void bio_copy_data(struct bio *dst, struct bio *src); 484 extern void bio_free_pages(struct bio *bio); 485 void guard_bio_eod(struct bio *bio); 486 void zero_fill_bio(struct bio *bio); 487 488 static inline void bio_release_pages(struct bio *bio, bool mark_dirty) 489 { 490 if (!bio_flagged(bio, BIO_NO_PAGE_REF)) 491 __bio_release_pages(bio, mark_dirty); 492 } 493 494 extern const char *bio_devname(struct bio *bio, char *buffer); 495 496 #define bio_dev(bio) \ 497 disk_devt((bio)->bi_bdev->bd_disk) 498 499 #ifdef CONFIG_BLK_CGROUP 500 void bio_associate_blkg(struct bio *bio); 501 void bio_associate_blkg_from_css(struct bio *bio, 502 struct cgroup_subsys_state *css); 503 void bio_clone_blkg_association(struct bio *dst, struct bio *src); 504 #else /* CONFIG_BLK_CGROUP */ 505 static inline void bio_associate_blkg(struct bio *bio) { } 506 static inline void bio_associate_blkg_from_css(struct bio *bio, 507 struct cgroup_subsys_state *css) 508 { } 509 static inline void bio_clone_blkg_association(struct bio *dst, 510 struct bio *src) { } 511 #endif /* CONFIG_BLK_CGROUP */ 512 513 static inline void bio_set_dev(struct bio *bio, struct block_device *bdev) 514 { 515 bio_clear_flag(bio, BIO_REMAPPED); 516 if (bio->bi_bdev != bdev) 517 bio_clear_flag(bio, BIO_THROTTLED); 518 bio->bi_bdev = bdev; 519 bio_associate_blkg(bio); 520 } 521 522 /* 523 * BIO list management for use by remapping drivers (e.g. DM or MD) and loop. 524 * 525 * A bio_list anchors a singly-linked list of bios chained through the bi_next 526 * member of the bio. The bio_list also caches the last list member to allow 527 * fast access to the tail. 528 */ 529 struct bio_list { 530 struct bio *head; 531 struct bio *tail; 532 }; 533 534 static inline int bio_list_empty(const struct bio_list *bl) 535 { 536 return bl->head == NULL; 537 } 538 539 static inline void bio_list_init(struct bio_list *bl) 540 { 541 bl->head = bl->tail = NULL; 542 } 543 544 #define BIO_EMPTY_LIST { NULL, NULL } 545 546 #define bio_list_for_each(bio, bl) \ 547 for (bio = (bl)->head; bio; bio = bio->bi_next) 548 549 static inline unsigned bio_list_size(const struct bio_list *bl) 550 { 551 unsigned sz = 0; 552 struct bio *bio; 553 554 bio_list_for_each(bio, bl) 555 sz++; 556 557 return sz; 558 } 559 560 static inline void bio_list_add(struct bio_list *bl, struct bio *bio) 561 { 562 bio->bi_next = NULL; 563 564 if (bl->tail) 565 bl->tail->bi_next = bio; 566 else 567 bl->head = bio; 568 569 bl->tail = bio; 570 } 571 572 static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio) 573 { 574 bio->bi_next = bl->head; 575 576 bl->head = bio; 577 578 if (!bl->tail) 579 bl->tail = bio; 580 } 581 582 static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2) 583 { 584 if (!bl2->head) 585 return; 586 587 if (bl->tail) 588 bl->tail->bi_next = bl2->head; 589 else 590 bl->head = bl2->head; 591 592 bl->tail = bl2->tail; 593 } 594 595 static inline void bio_list_merge_head(struct bio_list *bl, 596 struct bio_list *bl2) 597 { 598 if (!bl2->head) 599 return; 600 601 if (bl->head) 602 bl2->tail->bi_next = bl->head; 603 else 604 bl->tail = bl2->tail; 605 606 bl->head = bl2->head; 607 } 608 609 static inline struct bio *bio_list_peek(struct bio_list *bl) 610 { 611 return bl->head; 612 } 613 614 static inline struct bio *bio_list_pop(struct bio_list *bl) 615 { 616 struct bio *bio = bl->head; 617 618 if (bio) { 619 bl->head = bl->head->bi_next; 620 if (!bl->head) 621 bl->tail = NULL; 622 623 bio->bi_next = NULL; 624 } 625 626 return bio; 627 } 628 629 static inline struct bio *bio_list_get(struct bio_list *bl) 630 { 631 struct bio *bio = bl->head; 632 633 bl->head = bl->tail = NULL; 634 635 return bio; 636 } 637 638 /* 639 * Increment chain count for the bio. Make sure the CHAIN flag update 640 * is visible before the raised count. 641 */ 642 static inline void bio_inc_remaining(struct bio *bio) 643 { 644 bio_set_flag(bio, BIO_CHAIN); 645 smp_mb__before_atomic(); 646 atomic_inc(&bio->__bi_remaining); 647 } 648 649 /* 650 * bio_set is used to allow other portions of the IO system to 651 * allocate their own private memory pools for bio and iovec structures. 652 * These memory pools in turn all allocate from the bio_slab 653 * and the bvec_slabs[]. 654 */ 655 #define BIO_POOL_SIZE 2 656 657 struct bio_set { 658 struct kmem_cache *bio_slab; 659 unsigned int front_pad; 660 661 /* 662 * per-cpu bio alloc cache 663 */ 664 struct bio_alloc_cache __percpu *cache; 665 666 mempool_t bio_pool; 667 mempool_t bvec_pool; 668 #if defined(CONFIG_BLK_DEV_INTEGRITY) 669 mempool_t bio_integrity_pool; 670 mempool_t bvec_integrity_pool; 671 #endif 672 673 unsigned int back_pad; 674 /* 675 * Deadlock avoidance for stacking block drivers: see comments in 676 * bio_alloc_bioset() for details 677 */ 678 spinlock_t rescue_lock; 679 struct bio_list rescue_list; 680 struct work_struct rescue_work; 681 struct workqueue_struct *rescue_workqueue; 682 683 /* 684 * Hot un-plug notifier for the per-cpu cache, if used 685 */ 686 struct hlist_node cpuhp_dead; 687 }; 688 689 static inline bool bioset_initialized(struct bio_set *bs) 690 { 691 return bs->bio_slab != NULL; 692 } 693 694 #if defined(CONFIG_BLK_DEV_INTEGRITY) 695 696 #define bip_for_each_vec(bvl, bip, iter) \ 697 for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter) 698 699 #define bio_for_each_integrity_vec(_bvl, _bio, _iter) \ 700 for_each_bio(_bio) \ 701 bip_for_each_vec(_bvl, _bio->bi_integrity, _iter) 702 703 extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int); 704 extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int); 705 extern bool bio_integrity_prep(struct bio *); 706 extern void bio_integrity_advance(struct bio *, unsigned int); 707 extern void bio_integrity_trim(struct bio *); 708 extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t); 709 extern int bioset_integrity_create(struct bio_set *, int); 710 extern void bioset_integrity_free(struct bio_set *); 711 extern void bio_integrity_init(void); 712 713 #else /* CONFIG_BLK_DEV_INTEGRITY */ 714 715 static inline void *bio_integrity(struct bio *bio) 716 { 717 return NULL; 718 } 719 720 static inline int bioset_integrity_create(struct bio_set *bs, int pool_size) 721 { 722 return 0; 723 } 724 725 static inline void bioset_integrity_free (struct bio_set *bs) 726 { 727 return; 728 } 729 730 static inline bool bio_integrity_prep(struct bio *bio) 731 { 732 return true; 733 } 734 735 static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src, 736 gfp_t gfp_mask) 737 { 738 return 0; 739 } 740 741 static inline void bio_integrity_advance(struct bio *bio, 742 unsigned int bytes_done) 743 { 744 return; 745 } 746 747 static inline void bio_integrity_trim(struct bio *bio) 748 { 749 return; 750 } 751 752 static inline void bio_integrity_init(void) 753 { 754 return; 755 } 756 757 static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag) 758 { 759 return false; 760 } 761 762 static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp, 763 unsigned int nr) 764 { 765 return ERR_PTR(-EINVAL); 766 } 767 768 static inline int bio_integrity_add_page(struct bio *bio, struct page *page, 769 unsigned int len, unsigned int offset) 770 { 771 return 0; 772 } 773 774 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 775 776 /* 777 * Mark a bio as polled. Note that for async polled IO, the caller must 778 * expect -EWOULDBLOCK if we cannot allocate a request (or other resources). 779 * We cannot block waiting for requests on polled IO, as those completions 780 * must be found by the caller. This is different than IRQ driven IO, where 781 * it's safe to wait for IO to complete. 782 */ 783 static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb) 784 { 785 bio->bi_opf |= REQ_POLLED; 786 if (!is_sync_kiocb(kiocb)) 787 bio->bi_opf |= REQ_NOWAIT; 788 } 789 790 struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev, 791 unsigned int nr_pages, unsigned int opf, gfp_t gfp); 792 793 #endif /* __LINUX_BIO_H */ 794