| /linux-6.15/drivers/md/ |
| H A D | dm-crypt.c | 569 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { in crypt_iv_lmk_gen() 587 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) in crypt_iv_lmk_post() 693 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) { in crypt_iv_tcw_gen() 715 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) in crypt_iv_tcw_post() 1047 if (bio_data_dir(dmreq->ctx->bio_in) != WRITE) in crypt_iv_elephant_post() 1372 if (bio_data_dir(ctx->bio_in) == WRITE) { in crypt_convert_block_aead() 1469 if (bio_data_dir(ctx->bio_in) == WRITE) in crypt_convert_block_skcipher() 1850 unsigned int rw = bio_data_dir(clone); in crypt_endio() 2264 if (bio_data_dir(io->base_bio) == READ) { in kcryptd_async_done() 2281 if (bio_data_dir(io->base_bio) == READ) in kcryptd_crypt() [all …]
|
| H A D | dm-flakey.c | 359 (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_opf, in corrupt_bio_common() 502 if (bio_data_dir(bio) == READ) { in flakey_map() 562 if (!*error && pb->bio_submitted && (bio_data_dir(bio) == READ)) { in flakey_end_io()
|
| H A D | dm-cache-target.c | 529 return bio_data_dir(bio) == WRITE ? in lock_level() 766 if (bio_data_dir(bio) == WRITE) in remap_to_origin_clear_discard() 775 if (bio_data_dir(bio) == WRITE) { in remap_to_cache_dirty() 843 if (bio_data_dir(origin_bio) == WRITE) in remap_to_origin_and_cache() 1066 return (bio_data_dir(bio) == WRITE) && in bio_writes_complete_block() 1606 atomic_inc(bio_data_dir(bio) == READ ? in inc_hit_counter() 1612 atomic_inc(bio_data_dir(bio) == READ ? in inc_miss_counter() 1639 data_dir = bio_data_dir(bio); in map_bio() 1700 if (bio_data_dir(bio) == WRITE) { in map_bio() 1707 if (bio_data_dir(bio) == WRITE && writethrough_mode(cache) && in map_bio()
|
| H A D | dm-snap.c | 1972 if (bio_data_dir(bio) == WRITE) { in snapshot_map() 1981 bio_data_dir(bio) == WRITE)) { in snapshot_map() 2031 if (bio_data_dir(bio) == WRITE) { in snapshot_map() 2151 if (bio_data_dir(bio) == WRITE && in snapshot_merge_map() 2163 if (bio_data_dir(bio) == WRITE) in snapshot_merge_map() 2171 if (bio_data_dir(bio) == WRITE) { in snapshot_merge_map() 2677 if (bio_data_dir(bio) != WRITE) in origin_map()
|
| H A D | dm-raid1.c | 533 queue_bio(m->ms, bio, bio_data_dir(bio)); in read_callback() 1198 int r, rw = bio_data_dir(bio); in mirror_map() 1248 int rw = bio_data_dir(bio); in mirror_end_io()
|
| H A D | dm-log-writes.c | 666 if (bio_data_dir(bio) == READ) in log_writes_map() 772 if (bio_data_dir(bio) == WRITE && pb->block) { in normal_end_io()
|
| H A D | dm-delay.c | 357 if (bio_data_dir(bio) == WRITE) { in delay_map()
|
| H A D | dm-io.c | 148 if (bio->bi_status && bio_data_dir(bio) == READ) in endio()
|
| H A D | dm-dust.c | 232 if (bio_data_dir(bio) == READ) in dust_map()
|
| H A D | dm-thin.c | 1244 return (bio_data_dir(bio) == WRITE) && in io_overwrites_block() 1836 if (bio_data_dir(bio) == WRITE || op_is_flush(bio->bi_opf) || in __remap_and_issue_shared_cell() 1889 if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size) { in process_shared_bio() 1925 if (bio_data_dir(bio) == READ) { in provision_block() 1979 if (bio_data_dir(bio) == READ && tc->origin_dev) { in process_cell() 2030 int rw = bio_data_dir(bio); in __process_bio_read_only()
|
| H A D | dm-writecache.c | 1251 int rw = bio_data_dir(bio); in bio_copy_block() 1571 if (bio_data_dir(bio) == READ) in writecache_map() 1591 atomic_inc(&wc->bio_in_progress[bio_data_dir(bio)]); in writecache_map() 1621 int dir = bio_data_dir(bio); in writecache_end_io()
|
| /linux-6.15/block/ |
| H A D | blk-throttle.h | 161 int rw = bio_data_dir(bio); in blk_should_throtl()
|
| H A D | blk-throttle.c | 681 bool rw = bio_data_dir(bio); in tg_within_iops_limit() 708 bool rw = bio_data_dir(bio); in tg_within_bps_limit() 752 bool rw = bio_data_dir(bio); in tg_may_dispatch() 811 bool rw = bio_data_dir(bio); in throtl_charge_bio() 834 bool rw = bio_data_dir(bio); in throtl_add_bio_tg() 1623 bool rw = bio_data_dir(bio); in __blk_throtl_bio()
|
| H A D | bio-integrity.c | 104 if (bio_data_dir(bio) == READ) in bio_integrity_unmap_user() 278 if (bio_data_dir(bio) == READ) in bio_integrity_map_user()
|
| H A D | bio-integrity-auto.c | 184 if (bio_data_dir(bio) == WRITE && bip_should_check(&bid->bip)) in bio_integrity_prep()
|
| H A D | bounce.c | 204 int rw = bio_data_dir(bio_orig); in __blk_queue_bounce()
|
| H A D | blk-map.c | 122 else if (bio_data_dir(bio) == READ) in bio_uncopy_user() 675 bio_release_pages(bio, bio_data_dir(bio) == READ); in blk_rq_unmap_user()
|
| H A D | blk-crypto-fallback.c | 509 if (bio_data_dir(bio) == WRITE) in blk_crypto_fallback_bio_prep()
|
| /linux-6.15/drivers/md/dm-vdo/ |
| H A D | io-submitter.c | 209 if (bio_data_dir(bio) != bio_data_dir(vio_merge->bio)) in get_mergeable_locked()
|
| /linux-6.15/arch/m68k/emu/ |
| H A D | nfblock.c | 68 dir = bio_data_dir(bio); in nfhd_submit_bio()
|
| /linux-6.15/drivers/md/bcache/ |
| H A D | io.c | 142 int is_read = (bio_data_dir(bio) == READ ? 1 : 0); in bch_bbio_count_io_errors()
|
| /linux-6.15/drivers/block/drbd/ |
| H A D | drbd_req.c | 33 req->rq_state = (bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0) in drbd_req_new() 819 if (bio_data_dir(req->master_bio) == WRITE) in __req_mod() 1204 const int rw = bio_data_dir(bio); in drbd_request_prepare() 1321 const int rw = bio_data_dir(req->master_bio); in drbd_send_and_submit() 1449 const int rw = bio_data_dir(req->master_bio); in submit_fast_path()
|
| /linux-6.15/arch/xtensa/platforms/iss/ |
| H A D | simdisk.c | 116 bio_data_dir(bio) == WRITE); in simdisk_submit_bio()
|
| /linux-6.15/drivers/s390/block/ |
| H A D | dcssblk.c | 887 if (bio_data_dir(bio) == WRITE) { in dcssblk_submit_bio() 903 if (bio_data_dir(bio) == READ) in dcssblk_submit_bio()
|
| /linux-6.15/include/linux/ |
| H A D | bio.h | 46 #define bio_data_dir(bio) \ macro
|