1 /* 2 * 2.5 block I/O model 3 * 4 * Copyright (C) 2001 Jens Axboe <[email protected]> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public Licens 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- 19 */ 20 #ifndef __LINUX_BIO_H 21 #define __LINUX_BIO_H 22 23 #include <linux/highmem.h> 24 #include <linux/mempool.h> 25 #include <linux/ioprio.h> 26 #include <linux/bug.h> 27 28 #ifdef CONFIG_BLOCK 29 30 #include <asm/io.h> 31 32 /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */ 33 #include <linux/blk_types.h> 34 35 #define BIO_DEBUG 36 37 #ifdef BIO_DEBUG 38 #define BIO_BUG_ON BUG_ON 39 #else 40 #define BIO_BUG_ON 41 #endif 42 43 #define BIO_MAX_PAGES 256 44 #define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_CACHE_SHIFT) 45 #define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9) 46 47 /* 48 * upper 16 bits of bi_rw define the io priority of this bio 49 */ 50 #define BIO_PRIO_SHIFT (8 * sizeof(unsigned long) - IOPRIO_BITS) 51 #define bio_prio(bio) ((bio)->bi_rw >> BIO_PRIO_SHIFT) 52 #define bio_prio_valid(bio) ioprio_valid(bio_prio(bio)) 53 54 #define bio_set_prio(bio, prio) do { \ 55 WARN_ON(prio >= (1 << IOPRIO_BITS)); \ 56 (bio)->bi_rw &= ((1UL << BIO_PRIO_SHIFT) - 1); \ 57 (bio)->bi_rw |= ((unsigned long) (prio) << BIO_PRIO_SHIFT); \ 58 } while (0) 59 60 /* 61 * various member access, note that bio_data should of course not be used 62 * on highmem page vectors 63 */ 64 #define __bvec_iter_bvec(bvec, iter) (&(bvec)[(iter).bi_idx]) 65 66 #define bvec_iter_page(bvec, iter) \ 67 (__bvec_iter_bvec((bvec), (iter))->bv_page) 68 69 #define bvec_iter_len(bvec, iter) \ 70 min((iter).bi_size, \ 71 __bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done) 72 73 #define bvec_iter_offset(bvec, iter) \ 74 (__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done) 75 76 #define bvec_iter_bvec(bvec, iter) \ 77 ((struct bio_vec) { \ 78 .bv_page = bvec_iter_page((bvec), (iter)), \ 79 .bv_len = bvec_iter_len((bvec), (iter)), \ 80 .bv_offset = bvec_iter_offset((bvec), (iter)), \ 81 }) 82 83 #define bio_iter_iovec(bio, iter) \ 84 bvec_iter_bvec((bio)->bi_io_vec, (iter)) 85 86 #define bio_iter_page(bio, iter) \ 87 bvec_iter_page((bio)->bi_io_vec, (iter)) 88 #define bio_iter_len(bio, iter) \ 89 bvec_iter_len((bio)->bi_io_vec, (iter)) 90 #define bio_iter_offset(bio, iter) \ 91 bvec_iter_offset((bio)->bi_io_vec, (iter)) 92 93 #define bio_page(bio) bio_iter_page((bio), (bio)->bi_iter) 94 #define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter) 95 #define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter) 96 97 #define bio_multiple_segments(bio) \ 98 ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len) 99 #define bio_sectors(bio) ((bio)->bi_iter.bi_size >> 9) 100 #define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio))) 101 102 /* 103 * Check whether this bio carries any data or not. A NULL bio is allowed. 104 */ 105 static inline bool bio_has_data(struct bio *bio) 106 { 107 if (bio && 108 bio->bi_iter.bi_size && 109 !(bio->bi_rw & REQ_DISCARD)) 110 return true; 111 112 return false; 113 } 114 115 static inline bool bio_is_rw(struct bio *bio) 116 { 117 if (!bio_has_data(bio)) 118 return false; 119 120 if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK) 121 return false; 122 123 return true; 124 } 125 126 static inline bool bio_mergeable(struct bio *bio) 127 { 128 if (bio->bi_rw & REQ_NOMERGE_FLAGS) 129 return false; 130 131 return true; 132 } 133 134 static inline unsigned int bio_cur_bytes(struct bio *bio) 135 { 136 if (bio_has_data(bio)) 137 return bio_iovec(bio).bv_len; 138 else /* dataless requests such as discard */ 139 return bio->bi_iter.bi_size; 140 } 141 142 static inline void *bio_data(struct bio *bio) 143 { 144 if (bio_has_data(bio)) 145 return page_address(bio_page(bio)) + bio_offset(bio); 146 147 return NULL; 148 } 149 150 /* 151 * will die 152 */ 153 #define bio_to_phys(bio) (page_to_phys(bio_page((bio))) + (unsigned long) bio_offset((bio))) 154 #define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset) 155 156 /* 157 * queues that have highmem support enabled may still need to revert to 158 * PIO transfers occasionally and thus map high pages temporarily. For 159 * permanent PIO fall back, user is probably better off disabling highmem 160 * I/O completely on that queue (see ide-dma for example) 161 */ 162 #define __bio_kmap_atomic(bio, iter) \ 163 (kmap_atomic(bio_iter_iovec((bio), (iter)).bv_page) + \ 164 bio_iter_iovec((bio), (iter)).bv_offset) 165 166 #define __bio_kunmap_atomic(addr) kunmap_atomic(addr) 167 168 /* 169 * merge helpers etc 170 */ 171 172 /* Default implementation of BIOVEC_PHYS_MERGEABLE */ 173 #define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ 174 ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) 175 176 /* 177 * allow arch override, for eg virtualized architectures (put in asm/io.h) 178 */ 179 #ifndef BIOVEC_PHYS_MERGEABLE 180 #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ 181 __BIOVEC_PHYS_MERGEABLE(vec1, vec2) 182 #endif 183 184 #define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \ 185 (((addr1) | (mask)) == (((addr2) - 1) | (mask))) 186 #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ 187 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q))) 188 189 /* 190 * Check if adding a bio_vec after bprv with offset would create a gap in 191 * the SG list. Most drivers don't care about this, but some do. 192 */ 193 static inline bool bvec_gap_to_prev(struct bio_vec *bprv, unsigned int offset) 194 { 195 return offset || ((bprv->bv_offset + bprv->bv_len) & (PAGE_SIZE - 1)); 196 } 197 198 #define bio_io_error(bio) bio_endio((bio), -EIO) 199 200 /* 201 * drivers should _never_ use the all version - the bio may have been split 202 * before it got to the driver and the driver won't own all of it 203 */ 204 #define bio_for_each_segment_all(bvl, bio, i) \ 205 for (i = 0, bvl = (bio)->bi_io_vec; i < (bio)->bi_vcnt; i++, bvl++) 206 207 static inline void bvec_iter_advance(struct bio_vec *bv, struct bvec_iter *iter, 208 unsigned bytes) 209 { 210 WARN_ONCE(bytes > iter->bi_size, 211 "Attempted to advance past end of bvec iter\n"); 212 213 while (bytes) { 214 unsigned len = min(bytes, bvec_iter_len(bv, *iter)); 215 216 bytes -= len; 217 iter->bi_size -= len; 218 iter->bi_bvec_done += len; 219 220 if (iter->bi_bvec_done == __bvec_iter_bvec(bv, *iter)->bv_len) { 221 iter->bi_bvec_done = 0; 222 iter->bi_idx++; 223 } 224 } 225 } 226 227 #define for_each_bvec(bvl, bio_vec, iter, start) \ 228 for (iter = (start); \ 229 (iter).bi_size && \ 230 ((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \ 231 bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len)) 232 233 234 static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter, 235 unsigned bytes) 236 { 237 iter->bi_sector += bytes >> 9; 238 239 if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK) 240 iter->bi_size -= bytes; 241 else 242 bvec_iter_advance(bio->bi_io_vec, iter, bytes); 243 } 244 245 #define __bio_for_each_segment(bvl, bio, iter, start) \ 246 for (iter = (start); \ 247 (iter).bi_size && \ 248 ((bvl = bio_iter_iovec((bio), (iter))), 1); \ 249 bio_advance_iter((bio), &(iter), (bvl).bv_len)) 250 251 #define bio_for_each_segment(bvl, bio, iter) \ 252 __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter) 253 254 #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len) 255 256 static inline unsigned bio_segments(struct bio *bio) 257 { 258 unsigned segs = 0; 259 struct bio_vec bv; 260 struct bvec_iter iter; 261 262 /* 263 * We special case discard/write same, because they interpret bi_size 264 * differently: 265 */ 266 267 if (bio->bi_rw & REQ_DISCARD) 268 return 1; 269 270 if (bio->bi_rw & REQ_WRITE_SAME) 271 return 1; 272 273 bio_for_each_segment(bv, bio, iter) 274 segs++; 275 276 return segs; 277 } 278 279 /* 280 * get a reference to a bio, so it won't disappear. the intended use is 281 * something like: 282 * 283 * bio_get(bio); 284 * submit_bio(rw, bio); 285 * if (bio->bi_flags ...) 286 * do_something 287 * bio_put(bio); 288 * 289 * without the bio_get(), it could potentially complete I/O before submit_bio 290 * returns. and then bio would be freed memory when if (bio->bi_flags ...) 291 * runs 292 */ 293 #define bio_get(bio) atomic_inc(&(bio)->bi_cnt) 294 295 #if defined(CONFIG_BLK_DEV_INTEGRITY) 296 /* 297 * bio integrity payload 298 */ 299 struct bio_integrity_payload { 300 struct bio *bip_bio; /* parent bio */ 301 302 struct bvec_iter bip_iter; 303 304 /* kill - should just use bip_vec */ 305 void *bip_buf; /* generated integrity data */ 306 307 bio_end_io_t *bip_end_io; /* saved I/O completion fn */ 308 309 unsigned short bip_slab; /* slab the bip came from */ 310 unsigned short bip_vcnt; /* # of integrity bio_vecs */ 311 unsigned short bip_max_vcnt; /* integrity bio_vec slots */ 312 unsigned bip_owns_buf:1; /* should free bip_buf */ 313 314 struct work_struct bip_work; /* I/O completion */ 315 316 struct bio_vec *bip_vec; 317 struct bio_vec bip_inline_vecs[0];/* embedded bvec array */ 318 }; 319 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 320 321 extern void bio_trim(struct bio *bio, int offset, int size); 322 extern struct bio *bio_split(struct bio *bio, int sectors, 323 gfp_t gfp, struct bio_set *bs); 324 325 /** 326 * bio_next_split - get next @sectors from a bio, splitting if necessary 327 * @bio: bio to split 328 * @sectors: number of sectors to split from the front of @bio 329 * @gfp: gfp mask 330 * @bs: bio set to allocate from 331 * 332 * Returns a bio representing the next @sectors of @bio - if the bio is smaller 333 * than @sectors, returns the original bio unchanged. 334 */ 335 static inline struct bio *bio_next_split(struct bio *bio, int sectors, 336 gfp_t gfp, struct bio_set *bs) 337 { 338 if (sectors >= bio_sectors(bio)) 339 return bio; 340 341 return bio_split(bio, sectors, gfp, bs); 342 } 343 344 extern struct bio_set *bioset_create(unsigned int, unsigned int); 345 extern void bioset_free(struct bio_set *); 346 extern mempool_t *biovec_create_pool(int pool_entries); 347 348 extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *); 349 extern void bio_put(struct bio *); 350 351 extern void __bio_clone_fast(struct bio *, struct bio *); 352 extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *); 353 extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs); 354 355 extern struct bio_set *fs_bio_set; 356 unsigned int bio_integrity_tag_size(struct bio *bio); 357 358 static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) 359 { 360 return bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set); 361 } 362 363 static inline struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask) 364 { 365 return bio_clone_bioset(bio, gfp_mask, fs_bio_set); 366 } 367 368 static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs) 369 { 370 return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL); 371 } 372 373 static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask) 374 { 375 return bio_clone_bioset(bio, gfp_mask, NULL); 376 377 } 378 379 extern void bio_endio(struct bio *, int); 380 extern void bio_endio_nodec(struct bio *, int); 381 struct request_queue; 382 extern int bio_phys_segments(struct request_queue *, struct bio *); 383 384 extern int submit_bio_wait(int rw, struct bio *bio); 385 extern void bio_advance(struct bio *, unsigned); 386 387 extern void bio_init(struct bio *); 388 extern void bio_reset(struct bio *); 389 void bio_chain(struct bio *, struct bio *); 390 391 extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int); 392 extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, 393 unsigned int, unsigned int); 394 extern int bio_get_nr_vecs(struct block_device *); 395 extern struct bio *bio_map_user(struct request_queue *, struct block_device *, 396 unsigned long, unsigned int, int, gfp_t); 397 struct sg_iovec; 398 struct rq_map_data; 399 extern struct bio *bio_map_user_iov(struct request_queue *, 400 struct block_device *, 401 const struct sg_iovec *, int, int, gfp_t); 402 extern void bio_unmap_user(struct bio *); 403 extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int, 404 gfp_t); 405 extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int, 406 gfp_t, int); 407 extern void bio_set_pages_dirty(struct bio *bio); 408 extern void bio_check_pages_dirty(struct bio *bio); 409 410 #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 411 # error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" 412 #endif 413 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 414 extern void bio_flush_dcache_pages(struct bio *bi); 415 #else 416 static inline void bio_flush_dcache_pages(struct bio *bi) 417 { 418 } 419 #endif 420 421 extern void bio_copy_data(struct bio *dst, struct bio *src); 422 extern int bio_alloc_pages(struct bio *bio, gfp_t gfp); 423 424 extern struct bio *bio_copy_user(struct request_queue *, struct rq_map_data *, 425 unsigned long, unsigned int, int, gfp_t); 426 extern struct bio *bio_copy_user_iov(struct request_queue *, 427 struct rq_map_data *, 428 const struct sg_iovec *, 429 int, int, gfp_t); 430 extern int bio_uncopy_user(struct bio *); 431 void zero_fill_bio(struct bio *bio); 432 extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *); 433 extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int); 434 extern unsigned int bvec_nr_vecs(unsigned short idx); 435 436 #ifdef CONFIG_BLK_CGROUP 437 int bio_associate_current(struct bio *bio); 438 void bio_disassociate_task(struct bio *bio); 439 #else /* CONFIG_BLK_CGROUP */ 440 static inline int bio_associate_current(struct bio *bio) { return -ENOENT; } 441 static inline void bio_disassociate_task(struct bio *bio) { } 442 #endif /* CONFIG_BLK_CGROUP */ 443 444 #ifdef CONFIG_HIGHMEM 445 /* 446 * remember never ever reenable interrupts between a bvec_kmap_irq and 447 * bvec_kunmap_irq! 448 */ 449 static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) 450 { 451 unsigned long addr; 452 453 /* 454 * might not be a highmem page, but the preempt/irq count 455 * balancing is a lot nicer this way 456 */ 457 local_irq_save(*flags); 458 addr = (unsigned long) kmap_atomic(bvec->bv_page); 459 460 BUG_ON(addr & ~PAGE_MASK); 461 462 return (char *) addr + bvec->bv_offset; 463 } 464 465 static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) 466 { 467 unsigned long ptr = (unsigned long) buffer & PAGE_MASK; 468 469 kunmap_atomic((void *) ptr); 470 local_irq_restore(*flags); 471 } 472 473 #else 474 static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) 475 { 476 return page_address(bvec->bv_page) + bvec->bv_offset; 477 } 478 479 static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) 480 { 481 *flags = 0; 482 } 483 #endif 484 485 static inline char *__bio_kmap_irq(struct bio *bio, struct bvec_iter iter, 486 unsigned long *flags) 487 { 488 return bvec_kmap_irq(&bio_iter_iovec(bio, iter), flags); 489 } 490 #define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags) 491 492 #define bio_kmap_irq(bio, flags) \ 493 __bio_kmap_irq((bio), (bio)->bi_iter, (flags)) 494 #define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags) 495 496 /* 497 * BIO list management for use by remapping drivers (e.g. DM or MD) and loop. 498 * 499 * A bio_list anchors a singly-linked list of bios chained through the bi_next 500 * member of the bio. The bio_list also caches the last list member to allow 501 * fast access to the tail. 502 */ 503 struct bio_list { 504 struct bio *head; 505 struct bio *tail; 506 }; 507 508 static inline int bio_list_empty(const struct bio_list *bl) 509 { 510 return bl->head == NULL; 511 } 512 513 static inline void bio_list_init(struct bio_list *bl) 514 { 515 bl->head = bl->tail = NULL; 516 } 517 518 #define BIO_EMPTY_LIST { NULL, NULL } 519 520 #define bio_list_for_each(bio, bl) \ 521 for (bio = (bl)->head; bio; bio = bio->bi_next) 522 523 static inline unsigned bio_list_size(const struct bio_list *bl) 524 { 525 unsigned sz = 0; 526 struct bio *bio; 527 528 bio_list_for_each(bio, bl) 529 sz++; 530 531 return sz; 532 } 533 534 static inline void bio_list_add(struct bio_list *bl, struct bio *bio) 535 { 536 bio->bi_next = NULL; 537 538 if (bl->tail) 539 bl->tail->bi_next = bio; 540 else 541 bl->head = bio; 542 543 bl->tail = bio; 544 } 545 546 static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio) 547 { 548 bio->bi_next = bl->head; 549 550 bl->head = bio; 551 552 if (!bl->tail) 553 bl->tail = bio; 554 } 555 556 static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2) 557 { 558 if (!bl2->head) 559 return; 560 561 if (bl->tail) 562 bl->tail->bi_next = bl2->head; 563 else 564 bl->head = bl2->head; 565 566 bl->tail = bl2->tail; 567 } 568 569 static inline void bio_list_merge_head(struct bio_list *bl, 570 struct bio_list *bl2) 571 { 572 if (!bl2->head) 573 return; 574 575 if (bl->head) 576 bl2->tail->bi_next = bl->head; 577 else 578 bl->tail = bl2->tail; 579 580 bl->head = bl2->head; 581 } 582 583 static inline struct bio *bio_list_peek(struct bio_list *bl) 584 { 585 return bl->head; 586 } 587 588 static inline struct bio *bio_list_pop(struct bio_list *bl) 589 { 590 struct bio *bio = bl->head; 591 592 if (bio) { 593 bl->head = bl->head->bi_next; 594 if (!bl->head) 595 bl->tail = NULL; 596 597 bio->bi_next = NULL; 598 } 599 600 return bio; 601 } 602 603 static inline struct bio *bio_list_get(struct bio_list *bl) 604 { 605 struct bio *bio = bl->head; 606 607 bl->head = bl->tail = NULL; 608 609 return bio; 610 } 611 612 /* 613 * bio_set is used to allow other portions of the IO system to 614 * allocate their own private memory pools for bio and iovec structures. 615 * These memory pools in turn all allocate from the bio_slab 616 * and the bvec_slabs[]. 617 */ 618 #define BIO_POOL_SIZE 2 619 #define BIOVEC_NR_POOLS 6 620 #define BIOVEC_MAX_IDX (BIOVEC_NR_POOLS - 1) 621 622 struct bio_set { 623 struct kmem_cache *bio_slab; 624 unsigned int front_pad; 625 626 mempool_t *bio_pool; 627 mempool_t *bvec_pool; 628 #if defined(CONFIG_BLK_DEV_INTEGRITY) 629 mempool_t *bio_integrity_pool; 630 mempool_t *bvec_integrity_pool; 631 #endif 632 633 /* 634 * Deadlock avoidance for stacking block drivers: see comments in 635 * bio_alloc_bioset() for details 636 */ 637 spinlock_t rescue_lock; 638 struct bio_list rescue_list; 639 struct work_struct rescue_work; 640 struct workqueue_struct *rescue_workqueue; 641 }; 642 643 struct biovec_slab { 644 int nr_vecs; 645 char *name; 646 struct kmem_cache *slab; 647 }; 648 649 /* 650 * a small number of entries is fine, not going to be performance critical. 651 * basically we just need to survive 652 */ 653 #define BIO_SPLIT_ENTRIES 2 654 655 #if defined(CONFIG_BLK_DEV_INTEGRITY) 656 657 #define bip_for_each_vec(bvl, bip, iter) \ 658 for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter) 659 660 #define bio_for_each_integrity_vec(_bvl, _bio, _iter) \ 661 for_each_bio(_bio) \ 662 bip_for_each_vec(_bvl, _bio->bi_integrity, _iter) 663 664 #define bio_integrity(bio) (bio->bi_integrity != NULL) 665 666 extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int); 667 extern void bio_integrity_free(struct bio *); 668 extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int); 669 extern int bio_integrity_enabled(struct bio *bio); 670 extern int bio_integrity_set_tag(struct bio *, void *, unsigned int); 671 extern int bio_integrity_get_tag(struct bio *, void *, unsigned int); 672 extern int bio_integrity_prep(struct bio *); 673 extern void bio_integrity_endio(struct bio *, int); 674 extern void bio_integrity_advance(struct bio *, unsigned int); 675 extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int); 676 extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t); 677 extern int bioset_integrity_create(struct bio_set *, int); 678 extern void bioset_integrity_free(struct bio_set *); 679 extern void bio_integrity_init(void); 680 681 #else /* CONFIG_BLK_DEV_INTEGRITY */ 682 683 static inline int bio_integrity(struct bio *bio) 684 { 685 return 0; 686 } 687 688 static inline int bio_integrity_enabled(struct bio *bio) 689 { 690 return 0; 691 } 692 693 static inline int bioset_integrity_create(struct bio_set *bs, int pool_size) 694 { 695 return 0; 696 } 697 698 static inline void bioset_integrity_free (struct bio_set *bs) 699 { 700 return; 701 } 702 703 static inline int bio_integrity_prep(struct bio *bio) 704 { 705 return 0; 706 } 707 708 static inline void bio_integrity_free(struct bio *bio) 709 { 710 return; 711 } 712 713 static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src, 714 gfp_t gfp_mask) 715 { 716 return 0; 717 } 718 719 static inline void bio_integrity_advance(struct bio *bio, 720 unsigned int bytes_done) 721 { 722 return; 723 } 724 725 static inline void bio_integrity_trim(struct bio *bio, unsigned int offset, 726 unsigned int sectors) 727 { 728 return; 729 } 730 731 static inline void bio_integrity_init(void) 732 { 733 return; 734 } 735 736 #endif /* CONFIG_BLK_DEV_INTEGRITY */ 737 738 #endif /* CONFIG_BLOCK */ 739 #endif /* __LINUX_BIO_H */ 740