1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2f397c8d8SUlf Hansson /*
3f397c8d8SUlf Hansson * Copyright (C) 2003 Russell King, All Rights Reserved.
4f397c8d8SUlf Hansson * Copyright 2006-2007 Pierre Ossman
5f397c8d8SUlf Hansson */
6f397c8d8SUlf Hansson #include <linux/slab.h>
7f397c8d8SUlf Hansson #include <linux/module.h>
8f397c8d8SUlf Hansson #include <linux/blkdev.h>
9f397c8d8SUlf Hansson #include <linux/freezer.h>
10f397c8d8SUlf Hansson #include <linux/scatterlist.h>
11f397c8d8SUlf Hansson #include <linux/dma-mapping.h>
123a6ffb3cSAndreas Koop #include <linux/backing-dev.h>
13f397c8d8SUlf Hansson
14f397c8d8SUlf Hansson #include <linux/mmc/card.h>
15f397c8d8SUlf Hansson #include <linux/mmc/host.h>
16f397c8d8SUlf Hansson
17f397c8d8SUlf Hansson #include "queue.h"
18f397c8d8SUlf Hansson #include "block.h"
1955244c56SUlf Hansson #include "core.h"
204facdde1SUlf Hansson #include "card.h"
2193f1c150SEric Biggers #include "crypto.h"
2281196976SAdrian Hunter #include "host.h"
23f397c8d8SUlf Hansson
2438c38cb7SYoshihiro Shimoda #define MMC_DMA_MAP_MERGE_SEGMENTS 512
2538c38cb7SYoshihiro Shimoda
mmc_cqe_dcmd_busy(struct mmc_queue * mq)261e8e55b6SAdrian Hunter static inline bool mmc_cqe_dcmd_busy(struct mmc_queue *mq)
271e8e55b6SAdrian Hunter {
281e8e55b6SAdrian Hunter /* Allow only 1 DCMD at a time */
291e8e55b6SAdrian Hunter return mq->in_flight[MMC_ISSUE_DCMD];
301e8e55b6SAdrian Hunter }
311e8e55b6SAdrian Hunter
mmc_cqe_check_busy(struct mmc_queue * mq)321e8e55b6SAdrian Hunter void mmc_cqe_check_busy(struct mmc_queue *mq)
331e8e55b6SAdrian Hunter {
341e8e55b6SAdrian Hunter if ((mq->cqe_busy & MMC_CQE_DCMD_BUSY) && !mmc_cqe_dcmd_busy(mq))
351e8e55b6SAdrian Hunter mq->cqe_busy &= ~MMC_CQE_DCMD_BUSY;
361e8e55b6SAdrian Hunter }
371e8e55b6SAdrian Hunter
mmc_cqe_can_dcmd(struct mmc_host * host)381e8e55b6SAdrian Hunter static inline bool mmc_cqe_can_dcmd(struct mmc_host *host)
391e8e55b6SAdrian Hunter {
401e8e55b6SAdrian Hunter return host->caps2 & MMC_CAP2_CQE_DCMD;
411e8e55b6SAdrian Hunter }
421e8e55b6SAdrian Hunter
mmc_cqe_issue_type(struct mmc_host * host,struct request * req)4315ff2946SColin Ian King static enum mmc_issue_type mmc_cqe_issue_type(struct mmc_host *host,
441e8e55b6SAdrian Hunter struct request *req)
451e8e55b6SAdrian Hunter {
461e8e55b6SAdrian Hunter switch (req_op(req)) {
471e8e55b6SAdrian Hunter case REQ_OP_DRV_IN:
481e8e55b6SAdrian Hunter case REQ_OP_DRV_OUT:
491e8e55b6SAdrian Hunter case REQ_OP_DISCARD:
501e8e55b6SAdrian Hunter case REQ_OP_SECURE_ERASE:
51028822b7SVincent Whitchurch case REQ_OP_WRITE_ZEROES:
521e8e55b6SAdrian Hunter return MMC_ISSUE_SYNC;
531e8e55b6SAdrian Hunter case REQ_OP_FLUSH:
541e8e55b6SAdrian Hunter return mmc_cqe_can_dcmd(host) ? MMC_ISSUE_DCMD : MMC_ISSUE_SYNC;
551e8e55b6SAdrian Hunter default:
561e8e55b6SAdrian Hunter return MMC_ISSUE_ASYNC;
571e8e55b6SAdrian Hunter }
581e8e55b6SAdrian Hunter }
591e8e55b6SAdrian Hunter
mmc_issue_type(struct mmc_queue * mq,struct request * req)6081196976SAdrian Hunter enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req)
6181196976SAdrian Hunter {
621e8e55b6SAdrian Hunter struct mmc_host *host = mq->card->host;
631e8e55b6SAdrian Hunter
64407a1c57SLuca Porzio if (host->cqe_enabled && !host->hsq_enabled)
651e8e55b6SAdrian Hunter return mmc_cqe_issue_type(host, req);
661e8e55b6SAdrian Hunter
6781196976SAdrian Hunter if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE)
6881196976SAdrian Hunter return MMC_ISSUE_ASYNC;
6981196976SAdrian Hunter
7081196976SAdrian Hunter return MMC_ISSUE_SYNC;
7181196976SAdrian Hunter }
7281196976SAdrian Hunter
__mmc_cqe_recovery_notifier(struct mmc_queue * mq)731e8e55b6SAdrian Hunter static void __mmc_cqe_recovery_notifier(struct mmc_queue *mq)
741e8e55b6SAdrian Hunter {
751e8e55b6SAdrian Hunter if (!mq->recovery_needed) {
761e8e55b6SAdrian Hunter mq->recovery_needed = true;
771e8e55b6SAdrian Hunter schedule_work(&mq->recovery_work);
781e8e55b6SAdrian Hunter }
791e8e55b6SAdrian Hunter }
801e8e55b6SAdrian Hunter
mmc_cqe_recovery_notifier(struct mmc_request * mrq)811e8e55b6SAdrian Hunter void mmc_cqe_recovery_notifier(struct mmc_request *mrq)
821e8e55b6SAdrian Hunter {
831e8e55b6SAdrian Hunter struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req,
841e8e55b6SAdrian Hunter brq.mrq);
851e8e55b6SAdrian Hunter struct request *req = mmc_queue_req_to_req(mqrq);
861e8e55b6SAdrian Hunter struct request_queue *q = req->q;
871e8e55b6SAdrian Hunter struct mmc_queue *mq = q->queuedata;
881e8e55b6SAdrian Hunter unsigned long flags;
891e8e55b6SAdrian Hunter
90f5d72c5cSChristoph Hellwig spin_lock_irqsave(&mq->lock, flags);
911e8e55b6SAdrian Hunter __mmc_cqe_recovery_notifier(mq);
92f5d72c5cSChristoph Hellwig spin_unlock_irqrestore(&mq->lock, flags);
931e8e55b6SAdrian Hunter }
941e8e55b6SAdrian Hunter
mmc_cqe_timed_out(struct request * req)951e8e55b6SAdrian Hunter static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req)
961e8e55b6SAdrian Hunter {
971e8e55b6SAdrian Hunter struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
981e8e55b6SAdrian Hunter struct mmc_request *mrq = &mqrq->brq.mrq;
991e8e55b6SAdrian Hunter struct mmc_queue *mq = req->q->queuedata;
1001e8e55b6SAdrian Hunter struct mmc_host *host = mq->card->host;
1011e8e55b6SAdrian Hunter enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
1021e8e55b6SAdrian Hunter bool recovery_needed = false;
1031e8e55b6SAdrian Hunter
1041e8e55b6SAdrian Hunter switch (issue_type) {
1051e8e55b6SAdrian Hunter case MMC_ISSUE_ASYNC:
1061e8e55b6SAdrian Hunter case MMC_ISSUE_DCMD:
1071e8e55b6SAdrian Hunter if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) {
1081e8e55b6SAdrian Hunter if (recovery_needed)
10939a22f73SSarthak Garg mmc_cqe_recovery_notifier(mrq);
1101e8e55b6SAdrian Hunter return BLK_EH_RESET_TIMER;
1111e8e55b6SAdrian Hunter }
112c077dc5eSAdrian Hunter /* The request has gone already */
113ad73d6feSChristoph Hellwig return BLK_EH_DONE;
1141e8e55b6SAdrian Hunter default:
1151e8e55b6SAdrian Hunter /* Timeout is handled by mmc core */
1161e8e55b6SAdrian Hunter return BLK_EH_RESET_TIMER;
1171e8e55b6SAdrian Hunter }
1181e8e55b6SAdrian Hunter }
1191e8e55b6SAdrian Hunter
mmc_mq_timed_out(struct request * req)1209bdb4833SJohn Garry static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req)
12181196976SAdrian Hunter {
1221e8e55b6SAdrian Hunter struct request_queue *q = req->q;
1231e8e55b6SAdrian Hunter struct mmc_queue *mq = q->queuedata;
124511ce378SBaolin Wang struct mmc_card *card = mq->card;
125511ce378SBaolin Wang struct mmc_host *host = card->host;
1261e8e55b6SAdrian Hunter unsigned long flags;
12739a22f73SSarthak Garg bool ignore_tout;
1281e8e55b6SAdrian Hunter
129f5d72c5cSChristoph Hellwig spin_lock_irqsave(&mq->lock, flags);
130407a1c57SLuca Porzio ignore_tout = mq->recovery_needed || !host->cqe_enabled || host->hsq_enabled;
131f5d72c5cSChristoph Hellwig spin_unlock_irqrestore(&mq->lock, flags);
1321e8e55b6SAdrian Hunter
13339a22f73SSarthak Garg return ignore_tout ? BLK_EH_RESET_TIMER : mmc_cqe_timed_out(req);
1341e8e55b6SAdrian Hunter }
1351e8e55b6SAdrian Hunter
mmc_mq_recovery_handler(struct work_struct * work)1361e8e55b6SAdrian Hunter static void mmc_mq_recovery_handler(struct work_struct *work)
1371e8e55b6SAdrian Hunter {
1381e8e55b6SAdrian Hunter struct mmc_queue *mq = container_of(work, struct mmc_queue,
1391e8e55b6SAdrian Hunter recovery_work);
1401e8e55b6SAdrian Hunter struct request_queue *q = mq->queue;
141511ce378SBaolin Wang struct mmc_host *host = mq->card->host;
1421e8e55b6SAdrian Hunter
1431e8e55b6SAdrian Hunter mmc_get_card(mq->card, &mq->ctx);
1441e8e55b6SAdrian Hunter
1451e8e55b6SAdrian Hunter mq->in_recovery = true;
1461e8e55b6SAdrian Hunter
147407a1c57SLuca Porzio if (host->cqe_enabled && !host->hsq_enabled)
1481e8e55b6SAdrian Hunter mmc_blk_cqe_recovery(mq);
14910f21df4SAdrian Hunter else
15010f21df4SAdrian Hunter mmc_blk_mq_recovery(mq);
1511e8e55b6SAdrian Hunter
1521e8e55b6SAdrian Hunter mq->in_recovery = false;
1531e8e55b6SAdrian Hunter
154f5d72c5cSChristoph Hellwig spin_lock_irq(&mq->lock);
1551e8e55b6SAdrian Hunter mq->recovery_needed = false;
156f5d72c5cSChristoph Hellwig spin_unlock_irq(&mq->lock);
1571e8e55b6SAdrian Hunter
158511ce378SBaolin Wang if (host->hsq_enabled)
159511ce378SBaolin Wang host->cqe_ops->cqe_recovery_finish(host);
160511ce378SBaolin Wang
1611e8e55b6SAdrian Hunter mmc_put_card(mq->card, &mq->ctx);
1621e8e55b6SAdrian Hunter
1631e8e55b6SAdrian Hunter blk_mq_run_hw_queues(q, true);
16481196976SAdrian Hunter }
16581196976SAdrian Hunter
mmc_alloc_sg(unsigned short sg_len,gfp_t gfp)166f6f60707SChanWoo Lee static struct scatterlist *mmc_alloc_sg(unsigned short sg_len, gfp_t gfp)
167f397c8d8SUlf Hansson {
168f397c8d8SUlf Hansson struct scatterlist *sg;
169f397c8d8SUlf Hansson
170304419d8SLinus Walleij sg = kmalloc_array(sg_len, sizeof(*sg), gfp);
1717b410d07SAdrian Hunter if (sg)
172f397c8d8SUlf Hansson sg_init_table(sg, sg_len);
173f397c8d8SUlf Hansson
174f397c8d8SUlf Hansson return sg;
175f397c8d8SUlf Hansson }
176f397c8d8SUlf Hansson
mmc_queue_setup_discard(struct mmc_card * card,struct queue_limits * lim)177616f8766SChristoph Hellwig static void mmc_queue_setup_discard(struct mmc_card *card,
178616f8766SChristoph Hellwig struct queue_limits *lim)
179f397c8d8SUlf Hansson {
180f397c8d8SUlf Hansson unsigned max_discard;
181f397c8d8SUlf Hansson
182f397c8d8SUlf Hansson max_discard = mmc_calc_max_discard(card);
183f397c8d8SUlf Hansson if (!max_discard)
184f397c8d8SUlf Hansson return;
185f397c8d8SUlf Hansson
186616f8766SChristoph Hellwig lim->max_hw_discard_sectors = max_discard;
187616f8766SChristoph Hellwig if (mmc_can_secure_erase_trim(card))
188616f8766SChristoph Hellwig lim->max_secure_erase_sectors = max_discard;
189616f8766SChristoph Hellwig if (mmc_can_trim(card) && card->erased_byte == 0)
190616f8766SChristoph Hellwig lim->max_write_zeroes_sectors = max_discard;
191616f8766SChristoph Hellwig
192f397c8d8SUlf Hansson /* granularity must not be greater than max. discard */
193f397c8d8SUlf Hansson if (card->pref_erase > max_discard)
194616f8766SChristoph Hellwig lim->discard_granularity = SECTOR_SIZE;
195616f8766SChristoph Hellwig else
196616f8766SChristoph Hellwig lim->discard_granularity = card->pref_erase << 9;
197f397c8d8SUlf Hansson }
198f397c8d8SUlf Hansson
mmc_get_max_segments(struct mmc_host * host)199f6f60707SChanWoo Lee static unsigned short mmc_get_max_segments(struct mmc_host *host)
20038c38cb7SYoshihiro Shimoda {
20138c38cb7SYoshihiro Shimoda return host->can_dma_map_merge ? MMC_DMA_MAP_MERGE_SEGMENTS :
20238c38cb7SYoshihiro Shimoda host->max_segs;
20338c38cb7SYoshihiro Shimoda }
20438c38cb7SYoshihiro Shimoda
mmc_mq_init_request(struct blk_mq_tag_set * set,struct request * req,unsigned int hctx_idx,unsigned int numa_node)205f80c8e68SChanWoo Lee static int mmc_mq_init_request(struct blk_mq_tag_set *set, struct request *req,
206f80c8e68SChanWoo Lee unsigned int hctx_idx, unsigned int numa_node)
2077b410d07SAdrian Hunter {
208304419d8SLinus Walleij struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
209f80c8e68SChanWoo Lee struct mmc_queue *mq = set->driver_data;
210304419d8SLinus Walleij struct mmc_card *card = mq->card;
2117b410d07SAdrian Hunter struct mmc_host *host = card->host;
212f397c8d8SUlf Hansson
213f80c8e68SChanWoo Lee mq_rq->sg = mmc_alloc_sg(mmc_get_max_segments(host), GFP_KERNEL);
214304419d8SLinus Walleij if (!mq_rq->sg)
215f397c8d8SUlf Hansson return -ENOMEM;
2167b410d07SAdrian Hunter
217f397c8d8SUlf Hansson return 0;
2187b410d07SAdrian Hunter }
2197b410d07SAdrian Hunter
mmc_mq_exit_request(struct blk_mq_tag_set * set,struct request * req,unsigned int hctx_idx)220f80c8e68SChanWoo Lee static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req,
221f80c8e68SChanWoo Lee unsigned int hctx_idx)
2227b410d07SAdrian Hunter {
223304419d8SLinus Walleij struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
224f397c8d8SUlf Hansson
225304419d8SLinus Walleij kfree(mq_rq->sg);
226304419d8SLinus Walleij mq_rq->sg = NULL;
227f397c8d8SUlf Hansson }
228f397c8d8SUlf Hansson
mmc_mq_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)22981196976SAdrian Hunter static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
23081196976SAdrian Hunter const struct blk_mq_queue_data *bd)
23181196976SAdrian Hunter {
23281196976SAdrian Hunter struct request *req = bd->rq;
23381196976SAdrian Hunter struct request_queue *q = req->q;
23481196976SAdrian Hunter struct mmc_queue *mq = q->queuedata;
23581196976SAdrian Hunter struct mmc_card *card = mq->card;
2361e8e55b6SAdrian Hunter struct mmc_host *host = card->host;
23781196976SAdrian Hunter enum mmc_issue_type issue_type;
23881196976SAdrian Hunter enum mmc_issued issued;
2391e8e55b6SAdrian Hunter bool get_card, cqe_retune_ok;
2401ccaa1bdSJoel Stanley blk_status_t ret;
24181196976SAdrian Hunter
24281196976SAdrian Hunter if (mmc_card_removed(mq->card)) {
24381196976SAdrian Hunter req->rq_flags |= RQF_QUIET;
24481196976SAdrian Hunter return BLK_STS_IOERR;
24581196976SAdrian Hunter }
24681196976SAdrian Hunter
24781196976SAdrian Hunter issue_type = mmc_issue_type(mq, req);
24881196976SAdrian Hunter
249f5d72c5cSChristoph Hellwig spin_lock_irq(&mq->lock);
25081196976SAdrian Hunter
25126caddf2SAdrian Hunter if (mq->recovery_needed || mq->busy) {
252f5d72c5cSChristoph Hellwig spin_unlock_irq(&mq->lock);
2531e8e55b6SAdrian Hunter return BLK_STS_RESOURCE;
2541e8e55b6SAdrian Hunter }
2551e8e55b6SAdrian Hunter
25681196976SAdrian Hunter switch (issue_type) {
2571e8e55b6SAdrian Hunter case MMC_ISSUE_DCMD:
2581e8e55b6SAdrian Hunter if (mmc_cqe_dcmd_busy(mq)) {
2591e8e55b6SAdrian Hunter mq->cqe_busy |= MMC_CQE_DCMD_BUSY;
260f5d72c5cSChristoph Hellwig spin_unlock_irq(&mq->lock);
2611e8e55b6SAdrian Hunter return BLK_STS_RESOURCE;
2621e8e55b6SAdrian Hunter }
2631e8e55b6SAdrian Hunter break;
26481196976SAdrian Hunter case MMC_ISSUE_ASYNC:
2652e2b5479SWenchao Chen if (host->hsq_enabled && mq->in_flight[issue_type] > host->hsq_depth) {
266511ce378SBaolin Wang spin_unlock_irq(&mq->lock);
267511ce378SBaolin Wang return BLK_STS_RESOURCE;
268511ce378SBaolin Wang }
26981196976SAdrian Hunter break;
27081196976SAdrian Hunter default:
27181196976SAdrian Hunter /*
27281196976SAdrian Hunter * Timeouts are handled by mmc core, and we don't have a host
27381196976SAdrian Hunter * API to abort requests, so we can't handle the timeout anyway.
27481196976SAdrian Hunter * However, when the timeout happens, blk_mq_complete_request()
27581196976SAdrian Hunter * no longer works (to stop the request disappearing under us).
27681196976SAdrian Hunter * To avoid racing with that, set a large timeout.
27781196976SAdrian Hunter */
27881196976SAdrian Hunter req->timeout = 600 * HZ;
27981196976SAdrian Hunter break;
28081196976SAdrian Hunter }
28181196976SAdrian Hunter
28226caddf2SAdrian Hunter /* Parallel dispatch of requests is not supported at the moment */
28326caddf2SAdrian Hunter mq->busy = true;
28426caddf2SAdrian Hunter
28581196976SAdrian Hunter mq->in_flight[issue_type] += 1;
28681196976SAdrian Hunter get_card = (mmc_tot_in_flight(mq) == 1);
2871e8e55b6SAdrian Hunter cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1);
28881196976SAdrian Hunter
289f5d72c5cSChristoph Hellwig spin_unlock_irq(&mq->lock);
29081196976SAdrian Hunter
29181196976SAdrian Hunter if (!(req->rq_flags & RQF_DONTPREP)) {
29281196976SAdrian Hunter req_to_mmc_queue_req(req)->retries = 0;
29381196976SAdrian Hunter req->rq_flags |= RQF_DONTPREP;
29481196976SAdrian Hunter }
29581196976SAdrian Hunter
29681196976SAdrian Hunter if (get_card)
29781196976SAdrian Hunter mmc_get_card(card, &mq->ctx);
29881196976SAdrian Hunter
299407a1c57SLuca Porzio if (host->cqe_enabled) {
3001e8e55b6SAdrian Hunter host->retune_now = host->need_retune && cqe_retune_ok &&
3011e8e55b6SAdrian Hunter !host->hold_retune;
3021e8e55b6SAdrian Hunter }
3031e8e55b6SAdrian Hunter
30481196976SAdrian Hunter blk_mq_start_request(req);
30581196976SAdrian Hunter
30681196976SAdrian Hunter issued = mmc_blk_mq_issue_rq(mq, req);
30781196976SAdrian Hunter
30881196976SAdrian Hunter switch (issued) {
30981196976SAdrian Hunter case MMC_REQ_BUSY:
31081196976SAdrian Hunter ret = BLK_STS_RESOURCE;
31181196976SAdrian Hunter break;
31281196976SAdrian Hunter case MMC_REQ_FAILED_TO_START:
31381196976SAdrian Hunter ret = BLK_STS_IOERR;
31481196976SAdrian Hunter break;
31581196976SAdrian Hunter default:
31681196976SAdrian Hunter ret = BLK_STS_OK;
31781196976SAdrian Hunter break;
31881196976SAdrian Hunter }
31981196976SAdrian Hunter
32081196976SAdrian Hunter if (issued != MMC_REQ_STARTED) {
32181196976SAdrian Hunter bool put_card = false;
32281196976SAdrian Hunter
323f5d72c5cSChristoph Hellwig spin_lock_irq(&mq->lock);
32481196976SAdrian Hunter mq->in_flight[issue_type] -= 1;
32581196976SAdrian Hunter if (mmc_tot_in_flight(mq) == 0)
32681196976SAdrian Hunter put_card = true;
32726caddf2SAdrian Hunter mq->busy = false;
328f5d72c5cSChristoph Hellwig spin_unlock_irq(&mq->lock);
32981196976SAdrian Hunter if (put_card)
33081196976SAdrian Hunter mmc_put_card(card, &mq->ctx);
33126caddf2SAdrian Hunter } else {
33226caddf2SAdrian Hunter WRITE_ONCE(mq->busy, false);
33381196976SAdrian Hunter }
33481196976SAdrian Hunter
33581196976SAdrian Hunter return ret;
33681196976SAdrian Hunter }
33781196976SAdrian Hunter
33881196976SAdrian Hunter static const struct blk_mq_ops mmc_mq_ops = {
33981196976SAdrian Hunter .queue_rq = mmc_mq_queue_rq,
34081196976SAdrian Hunter .init_request = mmc_mq_init_request,
34181196976SAdrian Hunter .exit_request = mmc_mq_exit_request,
34281196976SAdrian Hunter .complete = mmc_blk_mq_complete,
34381196976SAdrian Hunter .timeout = mmc_mq_timed_out,
34481196976SAdrian Hunter };
34581196976SAdrian Hunter
mmc_alloc_disk(struct mmc_queue * mq,struct mmc_card * card,unsigned int features)346616f8766SChristoph Hellwig static struct gendisk *mmc_alloc_disk(struct mmc_queue *mq,
3471122c0c1SChristoph Hellwig struct mmc_card *card, unsigned int features)
348c8b5fd03SAdrian Hunter {
349c8b5fd03SAdrian Hunter struct mmc_host *host = card->host;
3501122c0c1SChristoph Hellwig struct queue_limits lim = {
3511122c0c1SChristoph Hellwig .features = features,
3521122c0c1SChristoph Hellwig };
353616f8766SChristoph Hellwig struct gendisk *disk;
354616f8766SChristoph Hellwig
355616f8766SChristoph Hellwig if (mmc_can_erase(card))
356616f8766SChristoph Hellwig mmc_queue_setup_discard(card, &lim);
357616f8766SChristoph Hellwig
358616f8766SChristoph Hellwig lim.max_hw_sectors = min(host->max_blk_count, host->max_req_size / 512);
359616f8766SChristoph Hellwig
360616f8766SChristoph Hellwig if (mmc_card_mmc(card) && card->ext_csd.data_sector_size)
361616f8766SChristoph Hellwig lim.logical_block_size = card->ext_csd.data_sector_size;
362616f8766SChristoph Hellwig else
363616f8766SChristoph Hellwig lim.logical_block_size = 512;
364616f8766SChristoph Hellwig
365616f8766SChristoph Hellwig WARN_ON_ONCE(lim.logical_block_size != 512 &&
366616f8766SChristoph Hellwig lim.logical_block_size != 4096);
367616f8766SChristoph Hellwig
368616f8766SChristoph Hellwig /*
369616f8766SChristoph Hellwig * Setting a virt_boundary implicity sets a max_segment_size, so try
370616f8766SChristoph Hellwig * to set the hardware one here.
371616f8766SChristoph Hellwig */
372616f8766SChristoph Hellwig if (host->can_dma_map_merge) {
373616f8766SChristoph Hellwig lim.virt_boundary_mask = dma_get_merge_boundary(mmc_dev(host));
374616f8766SChristoph Hellwig lim.max_segments = MMC_DMA_MAP_MERGE_SEGMENTS;
375616f8766SChristoph Hellwig } else {
376616f8766SChristoph Hellwig lim.max_segment_size =
377616f8766SChristoph Hellwig round_down(host->max_seg_size, lim.logical_block_size);
378616f8766SChristoph Hellwig lim.max_segments = host->max_segs;
379616f8766SChristoph Hellwig }
380616f8766SChristoph Hellwig
3811a02f3a7SChristoph Hellwig if (mmc_host_is_spi(host) && host->use_spi_crc)
3821a02f3a7SChristoph Hellwig lim.features |= BLK_FEAT_STABLE_WRITES;
3831a02f3a7SChristoph Hellwig
384616f8766SChristoph Hellwig disk = blk_mq_alloc_disk(&mq->tag_set, &lim, mq);
385616f8766SChristoph Hellwig if (IS_ERR(disk))
386616f8766SChristoph Hellwig return disk;
387616f8766SChristoph Hellwig mq->queue = disk->queue;
388616f8766SChristoph Hellwig
389616f8766SChristoph Hellwig blk_queue_rq_timeout(mq->queue, 60 * HZ);
390c8b5fd03SAdrian Hunter
391c26339faSGuenter Roeck if (mmc_dev(host)->dma_parms)
392cf1db7fcSChristoph Hellwig dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue));
393cf1db7fcSChristoph Hellwig
3941e8e55b6SAdrian Hunter INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler);
39581196976SAdrian Hunter INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work);
39681196976SAdrian Hunter
39781196976SAdrian Hunter mutex_init(&mq->complete_lock);
39881196976SAdrian Hunter
39981196976SAdrian Hunter init_waitqueue_head(&mq->wait);
40093f1c150SEric Biggers
40193f1c150SEric Biggers mmc_crypto_setup_queue(mq->queue, host);
402616f8766SChristoph Hellwig return disk;
40381196976SAdrian Hunter }
40481196976SAdrian Hunter
mmc_merge_capable(struct mmc_host * host)405427b0034SYoshihiro Shimoda static inline bool mmc_merge_capable(struct mmc_host *host)
406427b0034SYoshihiro Shimoda {
407427b0034SYoshihiro Shimoda return host->caps2 & MMC_CAP2_MERGE_CAPABLE;
408427b0034SYoshihiro Shimoda }
409427b0034SYoshihiro Shimoda
410b061b326SChristoph Hellwig /* Set queue depth to get a reasonable value for q->nr_requests */
411b061b326SChristoph Hellwig #define MMC_QUEUE_DEPTH 64
412b061b326SChristoph Hellwig
413b061b326SChristoph Hellwig /**
414b061b326SChristoph Hellwig * mmc_init_queue - initialise a queue structure.
415b061b326SChristoph Hellwig * @mq: mmc queue
416b061b326SChristoph Hellwig * @card: mmc card to attach this queue
4171122c0c1SChristoph Hellwig * @features: block layer features (BLK_FEAT_*)
418b061b326SChristoph Hellwig *
419b061b326SChristoph Hellwig * Initialise a MMC card request queue.
420b061b326SChristoph Hellwig */
mmc_init_queue(struct mmc_queue * mq,struct mmc_card * card,unsigned int features)4211122c0c1SChristoph Hellwig struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
4221122c0c1SChristoph Hellwig unsigned int features)
42381196976SAdrian Hunter {
424b061b326SChristoph Hellwig struct mmc_host *host = card->host;
425607d968aSChristoph Hellwig struct gendisk *disk;
42681196976SAdrian Hunter int ret;
42781196976SAdrian Hunter
428b061b326SChristoph Hellwig mq->card = card;
429b061b326SChristoph Hellwig
430f5d72c5cSChristoph Hellwig spin_lock_init(&mq->lock);
431f5d72c5cSChristoph Hellwig
43281196976SAdrian Hunter memset(&mq->tag_set, 0, sizeof(mq->tag_set));
433b061b326SChristoph Hellwig mq->tag_set.ops = &mmc_mq_ops;
434b061b326SChristoph Hellwig /*
435b061b326SChristoph Hellwig * The queue depth for CQE must match the hardware because the request
436b061b326SChristoph Hellwig * tag is used to index the hardware queue.
437b061b326SChristoph Hellwig */
438407a1c57SLuca Porzio if (host->cqe_enabled && !host->hsq_enabled)
439b061b326SChristoph Hellwig mq->tag_set.queue_depth =
440b061b326SChristoph Hellwig min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth);
441b061b326SChristoph Hellwig else
442b061b326SChristoph Hellwig mq->tag_set.queue_depth = MMC_QUEUE_DEPTH;
44381196976SAdrian Hunter mq->tag_set.numa_node = NUMA_NO_NODE;
444cc76ace4SChristoph Hellwig mq->tag_set.flags = BLK_MQ_F_BLOCKING;
44581196976SAdrian Hunter mq->tag_set.nr_hw_queues = 1;
44681196976SAdrian Hunter mq->tag_set.cmd_size = sizeof(struct mmc_queue_req);
44781196976SAdrian Hunter mq->tag_set.driver_data = mq;
44881196976SAdrian Hunter
44938c38cb7SYoshihiro Shimoda /*
45038c38cb7SYoshihiro Shimoda * Since blk_mq_alloc_tag_set() calls .init_request() of mmc_mq_ops,
45138c38cb7SYoshihiro Shimoda * the host->can_dma_map_merge should be set before to get max_segs
45238c38cb7SYoshihiro Shimoda * from mmc_get_max_segments().
45338c38cb7SYoshihiro Shimoda */
454427b0034SYoshihiro Shimoda if (mmc_merge_capable(host) &&
455427b0034SYoshihiro Shimoda host->max_segs < MMC_DMA_MAP_MERGE_SEGMENTS &&
45638c38cb7SYoshihiro Shimoda dma_get_merge_boundary(mmc_dev(host)))
45738c38cb7SYoshihiro Shimoda host->can_dma_map_merge = 1;
45838c38cb7SYoshihiro Shimoda else
45938c38cb7SYoshihiro Shimoda host->can_dma_map_merge = 0;
46038c38cb7SYoshihiro Shimoda
46181196976SAdrian Hunter ret = blk_mq_alloc_tag_set(&mq->tag_set);
46281196976SAdrian Hunter if (ret)
463607d968aSChristoph Hellwig return ERR_PTR(ret);
46481196976SAdrian Hunter
465607d968aSChristoph Hellwig
4661122c0c1SChristoph Hellwig disk = mmc_alloc_disk(mq, card, features);
467616f8766SChristoph Hellwig if (IS_ERR(disk))
468607d968aSChristoph Hellwig blk_mq_free_tag_set(&mq->tag_set);
469607d968aSChristoph Hellwig return disk;
47081196976SAdrian Hunter }
47181196976SAdrian Hunter
mmc_queue_suspend(struct mmc_queue * mq)4720fbfd125SAdrian Hunter void mmc_queue_suspend(struct mmc_queue *mq)
473f397c8d8SUlf Hansson {
47481196976SAdrian Hunter blk_mq_quiesce_queue(mq->queue);
475f397c8d8SUlf Hansson
47681196976SAdrian Hunter /*
47781196976SAdrian Hunter * The host remains claimed while there are outstanding requests, so
47881196976SAdrian Hunter * simply claiming and releasing here ensures there are none.
47981196976SAdrian Hunter */
48081196976SAdrian Hunter mmc_claim_host(mq->card->host);
48181196976SAdrian Hunter mmc_release_host(mq->card->host);
482f397c8d8SUlf Hansson }
483f397c8d8SUlf Hansson
mmc_queue_resume(struct mmc_queue * mq)4840fbfd125SAdrian Hunter void mmc_queue_resume(struct mmc_queue *mq)
48581196976SAdrian Hunter {
48681196976SAdrian Hunter blk_mq_unquiesce_queue(mq->queue);
48781196976SAdrian Hunter }
48881196976SAdrian Hunter
mmc_cleanup_queue(struct mmc_queue * mq)48981196976SAdrian Hunter void mmc_cleanup_queue(struct mmc_queue *mq)
49081196976SAdrian Hunter {
49181196976SAdrian Hunter struct request_queue *q = mq->queue;
49281196976SAdrian Hunter
49381196976SAdrian Hunter /*
49481196976SAdrian Hunter * The legacy code handled the possibility of being suspended,
49581196976SAdrian Hunter * so do that here too.
49681196976SAdrian Hunter */
49781196976SAdrian Hunter if (blk_queue_quiesced(q))
49881196976SAdrian Hunter blk_mq_unquiesce_queue(q);
49981196976SAdrian Hunter
500339e3eb1SChristian Löhle /*
501339e3eb1SChristian Löhle * If the recovery completes the last (and only remaining) request in
502339e3eb1SChristian Löhle * the queue, and the card has been removed, we could end up here with
503339e3eb1SChristian Löhle * the recovery not quite finished yet, so cancel it.
504339e3eb1SChristian Löhle */
505339e3eb1SChristian Löhle cancel_work_sync(&mq->recovery_work);
506339e3eb1SChristian Löhle
50743d8dabbSRaul E Rangel blk_mq_free_tag_set(&mq->tag_set);
50881196976SAdrian Hunter
50981196976SAdrian Hunter /*
51081196976SAdrian Hunter * A request can be completed before the next request, potentially
51181196976SAdrian Hunter * leaving a complete_work with nothing to do. Such a work item might
51281196976SAdrian Hunter * still be queued at this point. Flush it.
51381196976SAdrian Hunter */
51481196976SAdrian Hunter flush_work(&mq->complete_work);
51581196976SAdrian Hunter
51681196976SAdrian Hunter mq->card = NULL;
51781196976SAdrian Hunter }
51881196976SAdrian Hunter
519f397c8d8SUlf Hansson /*
520f397c8d8SUlf Hansson * Prepare the sg list(s) to be handed of to the host driver
521f397c8d8SUlf Hansson */
mmc_queue_map_sg(struct mmc_queue * mq,struct mmc_queue_req * mqrq)522f397c8d8SUlf Hansson unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
523f397c8d8SUlf Hansson {
52467e69d52SLinus Walleij struct request *req = mmc_queue_req_to_req(mqrq);
525f397c8d8SUlf Hansson
526*75618ac6SAnuj Gupta return blk_rq_map_sg(req, mqrq->sg);
527f397c8d8SUlf Hansson }
528