Home
last modified time | relevance | path

Searched refs:req_op (Results 1 – 25 of 85) sorted by relevance

1234

/linux-6.15/drivers/block/null_blk/
H A Dnull_blk.h134 blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op,
138 blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd, enum req_op op,
147 blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_op op,
166 enum req_op op, sector_t sector, sector_t nr_sectors) in null_process_zoned_cmd()
H A Dtrace.h44 __field_struct(enum req_op, op)
49 __entry->op = req_op(blk_mq_rq_from_pdu(cmd));
H A Dmain.c1291 op_is_write(req_op(rq)), sector, in null_handle_data_transfer()
1362 blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd, enum req_op op, in null_handle_memory_backed()
1379 if (!dev->memory_backed && req_op(rq) == REQ_OP_READ) { in nullb_zero_read_cmd_buffer()
1413 blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op, in null_process_cmd()
1433 sector_t nr_sectors, enum req_op op) in null_handle_cmd()
1598 cmd->error = null_process_cmd(cmd, req_op(req), blk_rq_pos(req), in null_poll()
1693 null_handle_cmd(cmd, sector, nr_sectors, req_op(rq)); in null_queue_rq()
/linux-6.15/block/
H A Dblk.h156 if (req_op(rq) == REQ_OP_FLUSH) in rq_mergeable()
159 if (req_op(rq) == REQ_OP_WRITE_ZEROES) in rq_mergeable()
162 if (req_op(rq) == REQ_OP_ZONE_APPEND) in rq_mergeable()
183 if (req_op(req) == REQ_OP_DISCARD && in blk_discard_mergable()
191 if (req_op(rq) == REQ_OP_DISCARD) in blk_rq_get_max_segments()
199 enum req_op op = req_op(rq); in blk_queue_get_max_sectors()
483 if (req_op(rq) == REQ_OP_ZONE_APPEND || in blk_zone_update_request_bio()
H A Dblk-merge.c602 req_op(rq) == REQ_OP_DISCARD || in blk_rq_get_max_sectors()
603 req_op(rq) == REQ_OP_SECURE_ERASE) in blk_rq_get_max_sectors()
619 if (req_op(req) == REQ_OP_DISCARD) in ll_new_hw_segment()
790 part_stat_inc(req->part, merges[op_stat_group(req_op(req))]); in blk_account_io_merge_request()
792 in_flight[op_is_write(req_op(req))]); in blk_account_io_merge_request()
830 if (req_op(req) != req_op(next)) in attempt_merge()
945 if (req_op(rq) != bio_op(bio)) in blk_rq_merge_ok()
979 part_stat_inc(req->part, merges[op_stat_group(req_op(req))]); in blk_account_io_merge_bio()
H A Dblk-map.c160 bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, req_op(rq)); in bio_copy_user_iov()
267 bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, req_op(rq)); in blk_rq_map_bio_alloc()
726 bio->bi_opf |= req_op(rq); in blk_rq_map_kern()
H A Dblk-core.c124 inline const char *blk_op_str(enum req_op op) in blk_op_str()
1030 unsigned long bdev_start_io_acct(struct block_device *bdev, enum req_op op, in bdev_start_io_acct()
1054 void bdev_end_io_acct(struct block_device *bdev, enum req_op op, in bdev_end_io_acct()
/linux-6.15/include/linux/
H A Dblk_types.h325 enum req_op { enum
431 static inline enum req_op bio_op(const struct bio *bio) in bio_op()
472 static inline bool op_is_zone_mgmt(enum req_op op) in op_is_zone_mgmt()
485 static inline int op_stat_group(enum req_op op) in op_stat_group()
H A Dblk-mq.h210 static inline enum req_op req_op(const struct request *req) in req_op() function
227 #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
230 (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
/linux-6.15/drivers/block/rnbd/
H A Drnbd-proto.h261 switch (req_op(rq)) { in rq_to_rnbd_flags()
282 (__force u32)req_op(rq), in rq_to_rnbd_flags()
/linux-6.15/fs/zonefs/
H A Dtrace.h24 enum req_op op),
29 __field(enum req_op, op)
H A Dzonefs.h260 int zonefs_inode_zone_mgmt(struct inode *inode, enum req_op op);
/linux-6.15/fs/
H A Ddirect-io.c169 const enum req_op dio_op = dio->opf & REQ_OP_MASK; in dio_refill_pages()
246 const enum req_op dio_op = dio->opf & REQ_OP_MASK; in dio_complete()
336 const enum req_op dio_op = dio->opf & REQ_OP_MASK; in dio_bio_end_aio()
427 const enum req_op dio_op = dio->opf & REQ_OP_MASK; in dio_bio_submit()
502 const enum req_op dio_op = dio->opf & REQ_OP_MASK; in dio_bio_complete()
606 const enum req_op dio_op = dio->opf & REQ_OP_MASK; in get_more_blocks()
789 const enum req_op dio_op = dio->opf & REQ_OP_MASK; in submit_page_section()
906 const enum req_op dio_op = dio->opf & REQ_OP_MASK; in do_direct_IO()
/linux-6.15/arch/um/drivers/
H A Dubd_kern.c451 if (req_op(io_req->req) == REQ_OP_DISCARD) in ubd_end_request()
453 else if (req_op(io_req->req) == REQ_OP_WRITE_ZEROES) in ubd_end_request()
1182 if (req_op(req->req) == REQ_OP_READ) { in cowify_req()
1202 enum req_op op = req_op(req); in ubd_map_req()
1265 enum req_op op = req_op(req); in ubd_submit_request()
1302 switch (req_op(req)) { in ubd_queue_rq()
1420 if (req_op(req->req) == REQ_OP_FLUSH) { in do_io()
1441 switch (req_op(req->req)) { in do_io()
/linux-6.15/include/trace/events/
H A Dnilfs2.h195 enum req_op mode),
207 __field_struct(enum req_op, mode)
/linux-6.15/drivers/block/
H A Dublk_drv.c405 switch (req_op(req)) { in ublk_setup_iod_zoned()
944 (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_DRV_IN); in ublk_need_unmap_req()
1023 enum req_op op = req_op(req); in ublk_setup_iod()
1030 switch (req_op(req)) { in ublk_setup_iod()
1081 if (!io->res && req_op(req) == REQ_OP_READ) in __ublk_complete_rq()
1095 if (req_op(req) != REQ_OP_READ && req_op(req) != REQ_OP_WRITE && in __ublk_complete_rq()
1096 req_op(req) != REQ_OP_DRV_IN) in __ublk_complete_rq()
1610 if (req_op(req) == REQ_OP_ZONE_APPEND) in ublk_commit_completion()
2113 req_op(req) == REQ_OP_READ)) in __ublk_ch_uring_cmd()
2222 if ((req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_DRV_IN) && in ublk_check_ubuf_dir()
[all …]
H A Dxen-blkfront.c566 if (req_op(req) == REQ_OP_SECURE_ERASE && info->feature_secdiscard) in blkif_queue_discard_req()
771 BUG_ON(req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA); in blkif_queue_rw_req()
783 if (req_op(req) == REQ_OP_FLUSH || in blkif_queue_rw_req()
784 (req_op(req) == REQ_OP_WRITE && (req->cmd_flags & REQ_FUA))) { in blkif_queue_rw_req()
876 if (unlikely(req_op(req) == REQ_OP_DISCARD || in blkif_queue_request()
877 req_op(req) == REQ_OP_SECURE_ERASE)) in blkif_queue_request()
915 if (unlikely(req_op(qd->rq) == REQ_OP_FLUSH && !info->feature_flush)) in blkif_queue_rq()
2085 if (req_op(shadow[j].request) == REQ_OP_FLUSH || in blkfront_resume()
2086 req_op(shadow[j].request) == REQ_OP_DISCARD || in blkfront_resume()
2087 req_op(shadow[j].request) == REQ_OP_SECURE_ERASE || in blkfront_resume()
H A Dps3disk.c168 switch (req_op(req)) { in ps3disk_do_request()
231 if (req_op(req) == REQ_OP_FLUSH) { in ps3disk_interrupt()
/linux-6.15/fs/xfs/
H A Dxfs_bio_io.c18 enum req_op op) in xfs_rw_bdev()
H A Dxfs_linux.h217 char *data, enum req_op op);
/linux-6.15/drivers/mmc/core/
H A Dqueue.c46 switch (req_op(req)) { in mmc_cqe_issue_type()
67 if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE) in mmc_issue_type()
/linux-6.15/drivers/crypto/hisilicon/sec2/
H A Dsec_crypto.c271 ctx->req_op->buf_unmap(ctx, req); in sec_req_cb()
273 ctx->req_op->callback(ctx, req, err); in sec_req_cb()
1217 ret = ctx->req_op->buf_map(ctx, req); in sec_request_transfer()
1221 ctx->req_op->do_transfer(ctx, req); in sec_request_transfer()
1223 ret = ctx->req_op->bd_fill(ctx, req); in sec_request_transfer()
1230 ctx->req_op->buf_unmap(ctx, req); in sec_request_transfer()
1236 ctx->req_op->buf_unmap(ctx, req); in sec_request_untransfer()
1757 ret = ctx->req_op->bd_send(ctx, req); in sec_process()
1834 ctx->req_op = &sec_skcipher_req_ops; in sec_skcipher_ctx_init()
1867 ctx->req_op = &sec_aead_req_ops; in sec_aead_init()
[all …]
H A Dsec.h136 const struct sec_req_op *req_op; member
/linux-6.15/drivers/md/
H A Ddm-rq.c218 if (req_op(clone) == REQ_OP_DISCARD && in dm_done()
221 else if (req_op(clone) == REQ_OP_WRITE_ZEROES && in dm_done()
H A Ddm-ebs-target.c65 static int __ebs_rw_bvec(struct ebs_c *ec, enum req_op op, struct bio_vec *bv, in __ebs_rw_bvec()
122 static int __ebs_rw_bio(struct ebs_c *ec, enum req_op op, struct bio *bio) in __ebs_rw_bio()

1234