Lines Matching refs:request_queue
53 static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx,
104 unsigned int blk_mq_in_flight(struct request_queue *q, in blk_mq_in_flight()
114 void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part, in blk_mq_in_flight_rw()
125 static bool blk_freeze_set_owner(struct request_queue *q, in blk_freeze_set_owner()
147 static bool blk_unfreeze_check_owner(struct request_queue *q) in blk_unfreeze_check_owner()
160 static bool blk_freeze_set_owner(struct request_queue *q, in blk_freeze_set_owner()
166 static bool blk_unfreeze_check_owner(struct request_queue *q) in blk_unfreeze_check_owner()
172 bool __blk_freeze_queue_start(struct request_queue *q, in __blk_freeze_queue_start()
191 void blk_freeze_queue_start(struct request_queue *q) in blk_freeze_queue_start()
198 void blk_mq_freeze_queue_wait(struct request_queue *q) in blk_mq_freeze_queue_wait()
204 int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, in blk_mq_freeze_queue_wait_timeout()
213 void blk_mq_freeze_queue_nomemsave(struct request_queue *q) in blk_mq_freeze_queue_nomemsave()
220 bool __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic) in __blk_mq_unfreeze_queue()
239 void blk_mq_unfreeze_queue_nomemrestore(struct request_queue *q) in blk_mq_unfreeze_queue_nomemrestore()
253 void blk_freeze_queue_start_non_owner(struct request_queue *q) in blk_freeze_queue_start_non_owner()
260 void blk_mq_unfreeze_queue_non_owner(struct request_queue *q) in blk_mq_unfreeze_queue_non_owner()
270 void blk_mq_quiesce_queue_nowait(struct request_queue *q) in blk_mq_quiesce_queue_nowait()
308 void blk_mq_quiesce_queue(struct request_queue *q) in blk_mq_quiesce_queue()
324 void blk_mq_unquiesce_queue(struct request_queue *q) in blk_mq_unquiesce_queue()
346 struct request_queue *q; in blk_mq_quiesce_tagset()
361 struct request_queue *q; in blk_mq_unquiesce_tagset()
372 void blk_mq_wake_waiters(struct request_queue *q) in blk_mq_wake_waiters()
382 void blk_rq_init(struct request_queue *q, struct request *rq) in blk_rq_init()
414 struct request_queue *q = data->q; in blk_mq_rq_ctx_init()
497 struct request_queue *q = data->q; in __blk_mq_alloc_requests()
579 static struct request *blk_mq_rq_cache_fill(struct request_queue *q, in blk_mq_rq_cache_fill()
604 static struct request *blk_mq_alloc_cached_request(struct request_queue *q, in blk_mq_alloc_cached_request()
639 struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf, in blk_mq_alloc_request()
672 struct request *blk_mq_alloc_request_hctx(struct request_queue *q, in blk_mq_alloc_request_hctx()
750 struct request_queue *q = rq->q; in blk_mq_finish_request()
767 struct request_queue *q = rq->q; in __blk_mq_free_request()
788 struct request_queue *q = rq->q; in blk_mq_free_request()
1156 struct request_queue *q = hctx->queue; in blk_mq_flush_tag_batch()
1337 struct request_queue *q = rq->q; in blk_mq_start_request()
1502 struct request_queue *q = rq->q; in __blk_mq_requeue_request()
1517 struct request_queue *q = rq->q; in blk_mq_requeue_request()
1536 struct request_queue *q = in blk_mq_requeue_work()
1537 container_of(work, struct request_queue, requeue_work.work); in blk_mq_requeue_work()
1571 void blk_mq_kick_requeue_list(struct request_queue *q) in blk_mq_kick_requeue_list()
1577 void blk_mq_delay_kick_requeue_list(struct request_queue *q, in blk_mq_delay_kick_requeue_list()
1614 bool blk_mq_queue_inflight(struct request_queue *q) in blk_mq_queue_inflight()
1703 struct request_queue *q = in blk_mq_timeout_work()
1704 container_of(work, struct request_queue, timeout_work); in blk_mq_timeout_work()
2049 static void blk_mq_release_budgets(struct request_queue *q, in blk_mq_release_budgets()
2086 struct request_queue *q = hctx->queue; in blk_mq_dispatch_rq_list()
2363 static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q) in blk_mq_get_sq_hctx()
2385 void blk_mq_run_hw_queues(struct request_queue *q, bool async) in blk_mq_run_hw_queues()
2413 void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs) in blk_mq_delay_run_hw_queues()
2470 void blk_mq_stop_hw_queues(struct request_queue *q) in blk_mq_stop_hw_queues()
2488 void blk_mq_start_hw_queues(struct request_queue *q) in blk_mq_start_hw_queues()
2514 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async) in blk_mq_start_stopped_hw_queues()
2593 struct request_queue *q = rq->q; in blk_mq_insert_request()
2679 struct request_queue *q = rq->q; in __blk_mq_issue_directly()
2820 static void __blk_mq_flush_plug_list(struct request_queue *q, in __blk_mq_flush_plug_list()
2891 struct request_queue *q; in blk_mq_flush_plug_list()
2954 static bool blk_mq_attempt_bio_merge(struct request_queue *q, in blk_mq_attempt_bio_merge()
2966 static struct request *blk_mq_get_new_requests(struct request_queue *q, in blk_mq_get_new_requests()
2995 struct request_queue *q, blk_opf_t opf) in blk_mq_peek_cached_request()
3031 static bool bio_unaligned(const struct bio *bio, struct request_queue *q) in bio_unaligned()
3057 struct request_queue *q = bdev_get_queue(bio->bi_bdev); in blk_mq_submit_bio()
3186 struct request_queue *q = rq->q; in blk_insert_cloned_request()
3805 static void blk_mq_remove_hw_queues_cpuhp(struct request_queue *q) in blk_mq_remove_hw_queues_cpuhp()
3827 static void blk_mq_add_hw_queues_cpuhp(struct request_queue *q) in blk_mq_add_hw_queues_cpuhp()
3868 static void blk_mq_exit_hctx(struct request_queue *q, in blk_mq_exit_hctx()
3893 static void blk_mq_exit_hw_queues(struct request_queue *q, in blk_mq_exit_hw_queues()
3907 static int blk_mq_init_hctx(struct request_queue *q, in blk_mq_init_hctx()
3939 blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set, in blk_mq_alloc_hctx()
4005 static void blk_mq_init_cpu_queues(struct request_queue *q, in blk_mq_init_cpu_queues()
4089 static void blk_mq_map_swqueue(struct request_queue *q) in blk_mq_map_swqueue()
4211 static void queue_set_hctx_shared(struct request_queue *q, bool shared) in queue_set_hctx_shared()
4229 struct request_queue *q; in blk_mq_update_tag_set_shared()
4241 static void blk_mq_del_queue_tag_set(struct request_queue *q) in blk_mq_del_queue_tag_set()
4258 struct request_queue *q) in blk_mq_add_queue_tag_set()
4279 static int blk_mq_alloc_ctxs(struct request_queue *q) in blk_mq_alloc_ctxs()
4312 void blk_mq_release(struct request_queue *q) in blk_mq_release()
4335 struct request_queue *blk_mq_alloc_queue(struct blk_mq_tag_set *set, in blk_mq_alloc_queue()
4339 struct request_queue *q; in blk_mq_alloc_queue()
4371 void blk_mq_destroy_queue(struct request_queue *q) in blk_mq_destroy_queue()
4392 struct request_queue *q; in __blk_mq_alloc_disk()
4410 struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q, in blk_mq_alloc_disk_for_queue()
4434 struct blk_mq_tag_set *set, struct request_queue *q, in blk_mq_alloc_and_init_hctx()
4468 struct request_queue *q) in __blk_mq_realloc_hw_ctxs()
4508 struct request_queue *q, bool lock) in blk_mq_realloc_hw_ctxs()
4527 struct request_queue *q) in blk_mq_init_allocated_queue()
4579 void blk_mq_exit_queue(struct request_queue *q) in blk_mq_exit_queue()
4873 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) in blk_mq_update_nr_requests()
4933 struct request_queue *q;
4942 struct request_queue *q) in blk_mq_elv_switch_none()
4972 struct request_queue *q) in blk_lookup_qe_pair()
4984 struct request_queue *q) in blk_mq_elv_switch_back()
5006 struct request_queue *q; in __blk_mq_update_nr_hw_queues()
5088 static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx, in blk_hctx_poll()
5115 int blk_mq_poll(struct request_queue *q, blk_qc_t cookie, in blk_mq_poll()
5126 struct request_queue *q = rq->q; in blk_rq_poll()
5147 void blk_mq_cancel_work_sync(struct request_queue *q) in blk_mq_cancel_work_sync()