Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
1da177e4 | 2 | /* |
1da177e4 | 3 | * Copyright (C) 2003 Russell King, All Rights Reserved. |
98ac2162 | 4 | * Copyright 2006-2007 Pierre Ossman |
1da177e4 | 5 | */ |
5a0e3ad6 | 6 | #include <linux/slab.h> |
1da177e4 LT |
7 | #include <linux/module.h> |
8 | #include <linux/blkdev.h> | |
83144186 | 9 | #include <linux/freezer.h> |
45711f1a | 10 | #include <linux/scatterlist.h> |
8e0cb8a1 | 11 | #include <linux/dma-mapping.h> |
3a6ffb3c | 12 | #include <linux/backing-dev.h> |
1da177e4 LT |
13 | |
14 | #include <linux/mmc/card.h> | |
15 | #include <linux/mmc/host.h> | |
29eb7bd0 | 16 | |
98ac2162 | 17 | #include "queue.h" |
29eb7bd0 | 18 | #include "block.h" |
55244c56 | 19 | #include "core.h" |
4facdde1 | 20 | #include "card.h" |
93f1c150 | 21 | #include "crypto.h" |
81196976 | 22 | #include "host.h" |
1da177e4 | 23 | |
38c38cb7 YS |
24 | #define MMC_DMA_MAP_MERGE_SEGMENTS 512 |
25 | ||
1e8e55b6 AH |
26 | static inline bool mmc_cqe_dcmd_busy(struct mmc_queue *mq) |
27 | { | |
28 | /* Allow only 1 DCMD at a time */ | |
29 | return mq->in_flight[MMC_ISSUE_DCMD]; | |
30 | } | |
31 | ||
32 | void mmc_cqe_check_busy(struct mmc_queue *mq) | |
33 | { | |
34 | if ((mq->cqe_busy & MMC_CQE_DCMD_BUSY) && !mmc_cqe_dcmd_busy(mq)) | |
35 | mq->cqe_busy &= ~MMC_CQE_DCMD_BUSY; | |
1e8e55b6 AH |
36 | } |
37 | ||
38 | static inline bool mmc_cqe_can_dcmd(struct mmc_host *host) | |
39 | { | |
40 | return host->caps2 & MMC_CAP2_CQE_DCMD; | |
41 | } | |
42 | ||
15ff2946 CIK |
43 | static enum mmc_issue_type mmc_cqe_issue_type(struct mmc_host *host, |
44 | struct request *req) | |
1e8e55b6 AH |
45 | { |
46 | switch (req_op(req)) { | |
47 | case REQ_OP_DRV_IN: | |
48 | case REQ_OP_DRV_OUT: | |
49 | case REQ_OP_DISCARD: | |
50 | case REQ_OP_SECURE_ERASE: | |
028822b7 | 51 | case REQ_OP_WRITE_ZEROES: |
1e8e55b6 AH |
52 | return MMC_ISSUE_SYNC; |
53 | case REQ_OP_FLUSH: | |
54 | return mmc_cqe_can_dcmd(host) ? MMC_ISSUE_DCMD : MMC_ISSUE_SYNC; | |
55 | default: | |
56 | return MMC_ISSUE_ASYNC; | |
57 | } | |
58 | } | |
59 | ||
81196976 AH |
60 | enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req) |
61 | { | |
1e8e55b6 AH |
62 | struct mmc_host *host = mq->card->host; |
63 | ||
407a1c57 | 64 | if (host->cqe_enabled && !host->hsq_enabled) |
1e8e55b6 AH |
65 | return mmc_cqe_issue_type(host, req); |
66 | ||
81196976 AH |
67 | if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE) |
68 | return MMC_ISSUE_ASYNC; | |
69 | ||
70 | return MMC_ISSUE_SYNC; | |
71 | } | |
72 | ||
1e8e55b6 AH |
73 | static void __mmc_cqe_recovery_notifier(struct mmc_queue *mq) |
74 | { | |
75 | if (!mq->recovery_needed) { | |
76 | mq->recovery_needed = true; | |
77 | schedule_work(&mq->recovery_work); | |
78 | } | |
79 | } | |
80 | ||
81 | void mmc_cqe_recovery_notifier(struct mmc_request *mrq) | |
82 | { | |
83 | struct mmc_queue_req *mqrq = container_of(mrq, struct mmc_queue_req, | |
84 | brq.mrq); | |
85 | struct request *req = mmc_queue_req_to_req(mqrq); | |
86 | struct request_queue *q = req->q; | |
87 | struct mmc_queue *mq = q->queuedata; | |
88 | unsigned long flags; | |
89 | ||
f5d72c5c | 90 | spin_lock_irqsave(&mq->lock, flags); |
1e8e55b6 | 91 | __mmc_cqe_recovery_notifier(mq); |
f5d72c5c | 92 | spin_unlock_irqrestore(&mq->lock, flags); |
1e8e55b6 AH |
93 | } |
94 | ||
95 | static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req) | |
96 | { | |
97 | struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); | |
98 | struct mmc_request *mrq = &mqrq->brq.mrq; | |
99 | struct mmc_queue *mq = req->q->queuedata; | |
100 | struct mmc_host *host = mq->card->host; | |
101 | enum mmc_issue_type issue_type = mmc_issue_type(mq, req); | |
102 | bool recovery_needed = false; | |
103 | ||
104 | switch (issue_type) { | |
105 | case MMC_ISSUE_ASYNC: | |
106 | case MMC_ISSUE_DCMD: | |
107 | if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) { | |
108 | if (recovery_needed) | |
39a22f73 | 109 | mmc_cqe_recovery_notifier(mrq); |
1e8e55b6 AH |
110 | return BLK_EH_RESET_TIMER; |
111 | } | |
c077dc5e | 112 | /* The request has gone already */ |
ad73d6fe | 113 | return BLK_EH_DONE; |
1e8e55b6 AH |
114 | default: |
115 | /* Timeout is handled by mmc core */ | |
116 | return BLK_EH_RESET_TIMER; | |
117 | } | |
118 | } | |
119 | ||
9bdb4833 | 120 | static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req) |
81196976 | 121 | { |
1e8e55b6 AH |
122 | struct request_queue *q = req->q; |
123 | struct mmc_queue *mq = q->queuedata; | |
511ce378 BW |
124 | struct mmc_card *card = mq->card; |
125 | struct mmc_host *host = card->host; | |
1e8e55b6 | 126 | unsigned long flags; |
39a22f73 | 127 | bool ignore_tout; |
1e8e55b6 | 128 | |
f5d72c5c | 129 | spin_lock_irqsave(&mq->lock, flags); |
407a1c57 | 130 | ignore_tout = mq->recovery_needed || !host->cqe_enabled || host->hsq_enabled; |
f5d72c5c | 131 | spin_unlock_irqrestore(&mq->lock, flags); |
1e8e55b6 | 132 | |
39a22f73 | 133 | return ignore_tout ? BLK_EH_RESET_TIMER : mmc_cqe_timed_out(req); |
1e8e55b6 AH |
134 | } |
135 | ||
136 | static void mmc_mq_recovery_handler(struct work_struct *work) | |
137 | { | |
138 | struct mmc_queue *mq = container_of(work, struct mmc_queue, | |
139 | recovery_work); | |
140 | struct request_queue *q = mq->queue; | |
511ce378 | 141 | struct mmc_host *host = mq->card->host; |
1e8e55b6 AH |
142 | |
143 | mmc_get_card(mq->card, &mq->ctx); | |
144 | ||
145 | mq->in_recovery = true; | |
146 | ||
407a1c57 | 147 | if (host->cqe_enabled && !host->hsq_enabled) |
10f21df4 AH |
148 | mmc_blk_cqe_recovery(mq); |
149 | else | |
150 | mmc_blk_mq_recovery(mq); | |
1e8e55b6 AH |
151 | |
152 | mq->in_recovery = false; | |
153 | ||
f5d72c5c | 154 | spin_lock_irq(&mq->lock); |
1e8e55b6 | 155 | mq->recovery_needed = false; |
f5d72c5c | 156 | spin_unlock_irq(&mq->lock); |
1e8e55b6 | 157 | |
511ce378 BW |
158 | if (host->hsq_enabled) |
159 | host->cqe_ops->cqe_recovery_finish(host); | |
160 | ||
1e8e55b6 AH |
161 | mmc_put_card(mq->card, &mq->ctx); |
162 | ||
163 | blk_mq_run_hw_queues(q, true); | |
81196976 AH |
164 | } |
165 | ||
f6f60707 | 166 | static struct scatterlist *mmc_alloc_sg(unsigned short sg_len, gfp_t gfp) |
97868a2b PF |
167 | { |
168 | struct scatterlist *sg; | |
169 | ||
304419d8 | 170 | sg = kmalloc_array(sg_len, sizeof(*sg), gfp); |
7b410d07 | 171 | if (sg) |
97868a2b | 172 | sg_init_table(sg, sg_len); |
97868a2b PF |
173 | |
174 | return sg; | |
175 | } | |
176 | ||
616f8766 CH |
177 | static void mmc_queue_setup_discard(struct mmc_card *card, |
178 | struct queue_limits *lim) | |
e056a1b5 AH |
179 | { |
180 | unsigned max_discard; | |
181 | ||
182 | max_discard = mmc_calc_max_discard(card); | |
183 | if (!max_discard) | |
184 | return; | |
185 | ||
616f8766 | 186 | lim->max_hw_discard_sectors = max_discard; |
775a9362 | 187 | if (mmc_can_secure_erase_trim(card)) |
616f8766 | 188 | lim->max_secure_erase_sectors = max_discard; |
f7b6fc32 | 189 | if (mmc_can_trim(card) && card->erased_byte == 0) |
616f8766 CH |
190 | lim->max_write_zeroes_sectors = max_discard; |
191 | ||
192 | /* granularity must not be greater than max. discard */ | |
193 | if (card->pref_erase > max_discard) | |
194 | lim->discard_granularity = SECTOR_SIZE; | |
195 | else | |
196 | lim->discard_granularity = card->pref_erase << 9; | |
e056a1b5 AH |
197 | } |
198 | ||
f6f60707 | 199 | static unsigned short mmc_get_max_segments(struct mmc_host *host) |
38c38cb7 YS |
200 | { |
201 | return host->can_dma_map_merge ? MMC_DMA_MAP_MERGE_SEGMENTS : | |
202 | host->max_segs; | |
203 | } | |
204 | ||
f80c8e68 CL |
205 | static int mmc_mq_init_request(struct blk_mq_tag_set *set, struct request *req, |
206 | unsigned int hctx_idx, unsigned int numa_node) | |
64e29e42 | 207 | { |
304419d8 | 208 | struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); |
f80c8e68 | 209 | struct mmc_queue *mq = set->driver_data; |
304419d8 LW |
210 | struct mmc_card *card = mq->card; |
211 | struct mmc_host *host = card->host; | |
64e29e42 | 212 | |
f80c8e68 | 213 | mq_rq->sg = mmc_alloc_sg(mmc_get_max_segments(host), GFP_KERNEL); |
de3ee99b LW |
214 | if (!mq_rq->sg) |
215 | return -ENOMEM; | |
64e29e42 | 216 | |
c5bda0ca AH |
217 | return 0; |
218 | } | |
64e29e42 | 219 | |
f80c8e68 CL |
220 | static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req, |
221 | unsigned int hctx_idx) | |
c5bda0ca | 222 | { |
304419d8 | 223 | struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); |
7b410d07 | 224 | |
304419d8 LW |
225 | kfree(mq_rq->sg); |
226 | mq_rq->sg = NULL; | |
c09949cf AH |
227 | } |
228 | ||
81196976 AH |
229 | static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, |
230 | const struct blk_mq_queue_data *bd) | |
231 | { | |
232 | struct request *req = bd->rq; | |
233 | struct request_queue *q = req->q; | |
234 | struct mmc_queue *mq = q->queuedata; | |
235 | struct mmc_card *card = mq->card; | |
1e8e55b6 | 236 | struct mmc_host *host = card->host; |
81196976 AH |
237 | enum mmc_issue_type issue_type; |
238 | enum mmc_issued issued; | |
1e8e55b6 | 239 | bool get_card, cqe_retune_ok; |
1ccaa1bd | 240 | blk_status_t ret; |
81196976 AH |
241 | |
242 | if (mmc_card_removed(mq->card)) { | |
243 | req->rq_flags |= RQF_QUIET; | |
244 | return BLK_STS_IOERR; | |
245 | } | |
246 | ||
247 | issue_type = mmc_issue_type(mq, req); | |
248 | ||
f5d72c5c | 249 | spin_lock_irq(&mq->lock); |
81196976 | 250 | |
26caddf2 | 251 | if (mq->recovery_needed || mq->busy) { |
f5d72c5c | 252 | spin_unlock_irq(&mq->lock); |
1e8e55b6 AH |
253 | return BLK_STS_RESOURCE; |
254 | } | |
255 | ||
81196976 | 256 | switch (issue_type) { |
1e8e55b6 AH |
257 | case MMC_ISSUE_DCMD: |
258 | if (mmc_cqe_dcmd_busy(mq)) { | |
259 | mq->cqe_busy |= MMC_CQE_DCMD_BUSY; | |
f5d72c5c | 260 | spin_unlock_irq(&mq->lock); |
1e8e55b6 AH |
261 | return BLK_STS_RESOURCE; |
262 | } | |
263 | break; | |
81196976 | 264 | case MMC_ISSUE_ASYNC: |
2e2b5479 | 265 | if (host->hsq_enabled && mq->in_flight[issue_type] > host->hsq_depth) { |
511ce378 BW |
266 | spin_unlock_irq(&mq->lock); |
267 | return BLK_STS_RESOURCE; | |
268 | } | |
81196976 AH |
269 | break; |
270 | default: | |
271 | /* | |
272 | * Timeouts are handled by mmc core, and we don't have a host | |
273 | * API to abort requests, so we can't handle the timeout anyway. | |
274 | * However, when the timeout happens, blk_mq_complete_request() | |
275 | * no longer works (to stop the request disappearing under us). | |
276 | * To avoid racing with that, set a large timeout. | |
277 | */ | |
278 | req->timeout = 600 * HZ; | |
279 | break; | |
280 | } | |
281 | ||
26caddf2 AH |
282 | /* Parallel dispatch of requests is not supported at the moment */ |
283 | mq->busy = true; | |
284 | ||
81196976 AH |
285 | mq->in_flight[issue_type] += 1; |
286 | get_card = (mmc_tot_in_flight(mq) == 1); | |
1e8e55b6 | 287 | cqe_retune_ok = (mmc_cqe_qcnt(mq) == 1); |
81196976 | 288 | |
f5d72c5c | 289 | spin_unlock_irq(&mq->lock); |
81196976 AH |
290 | |
291 | if (!(req->rq_flags & RQF_DONTPREP)) { | |
292 | req_to_mmc_queue_req(req)->retries = 0; | |
293 | req->rq_flags |= RQF_DONTPREP; | |
294 | } | |
295 | ||
296 | if (get_card) | |
297 | mmc_get_card(card, &mq->ctx); | |
298 | ||
407a1c57 | 299 | if (host->cqe_enabled) { |
1e8e55b6 AH |
300 | host->retune_now = host->need_retune && cqe_retune_ok && |
301 | !host->hold_retune; | |
302 | } | |
303 | ||
81196976 AH |
304 | blk_mq_start_request(req); |
305 | ||
306 | issued = mmc_blk_mq_issue_rq(mq, req); | |
307 | ||
308 | switch (issued) { | |
309 | case MMC_REQ_BUSY: | |
310 | ret = BLK_STS_RESOURCE; | |
311 | break; | |
312 | case MMC_REQ_FAILED_TO_START: | |
313 | ret = BLK_STS_IOERR; | |
314 | break; | |
315 | default: | |
316 | ret = BLK_STS_OK; | |
317 | break; | |
318 | } | |
319 | ||
320 | if (issued != MMC_REQ_STARTED) { | |
321 | bool put_card = false; | |
322 | ||
f5d72c5c | 323 | spin_lock_irq(&mq->lock); |
81196976 AH |
324 | mq->in_flight[issue_type] -= 1; |
325 | if (mmc_tot_in_flight(mq) == 0) | |
326 | put_card = true; | |
26caddf2 | 327 | mq->busy = false; |
f5d72c5c | 328 | spin_unlock_irq(&mq->lock); |
81196976 AH |
329 | if (put_card) |
330 | mmc_put_card(card, &mq->ctx); | |
26caddf2 AH |
331 | } else { |
332 | WRITE_ONCE(mq->busy, false); | |
81196976 AH |
333 | } |
334 | ||
335 | return ret; | |
336 | } | |
337 | ||
338 | static const struct blk_mq_ops mmc_mq_ops = { | |
339 | .queue_rq = mmc_mq_queue_rq, | |
340 | .init_request = mmc_mq_init_request, | |
341 | .exit_request = mmc_mq_exit_request, | |
342 | .complete = mmc_blk_mq_complete, | |
343 | .timeout = mmc_mq_timed_out, | |
344 | }; | |
345 | ||
616f8766 CH |
346 | static struct gendisk *mmc_alloc_disk(struct mmc_queue *mq, |
347 | struct mmc_card *card) | |
c8b5fd03 AH |
348 | { |
349 | struct mmc_host *host = card->host; | |
616f8766 CH |
350 | struct queue_limits lim = { }; |
351 | struct gendisk *disk; | |
c8b5fd03 | 352 | |
c8b5fd03 | 353 | if (mmc_can_erase(card)) |
616f8766 | 354 | mmc_queue_setup_discard(card, &lim); |
c8b5fd03 | 355 | |
7559d612 | 356 | if (!mmc_dev(host)->dma_mask || !*mmc_dev(host)->dma_mask) |
616f8766 CH |
357 | lim.bounce = BLK_BOUNCE_HIGH; |
358 | ||
359 | lim.max_hw_sectors = min(host->max_blk_count, host->max_req_size / 512); | |
360 | ||
361 | if (mmc_card_mmc(card) && card->ext_csd.data_sector_size) | |
362 | lim.logical_block_size = card->ext_csd.data_sector_size; | |
363 | else | |
364 | lim.logical_block_size = 512; | |
365 | ||
366 | WARN_ON_ONCE(lim.logical_block_size != 512 && | |
367 | lim.logical_block_size != 4096); | |
c53336c8 | 368 | |
38c38cb7 | 369 | /* |
616f8766 CH |
370 | * Setting a virt_boundary implicity sets a max_segment_size, so try |
371 | * to set the hardware one here. | |
38c38cb7 | 372 | */ |
616f8766 CH |
373 | if (host->can_dma_map_merge) { |
374 | lim.virt_boundary_mask = dma_get_merge_boundary(mmc_dev(host)); | |
375 | lim.max_segments = MMC_DMA_MAP_MERGE_SEGMENTS; | |
376 | } else { | |
377 | lim.max_segment_size = | |
378 | round_down(host->max_seg_size, lim.logical_block_size); | |
379 | lim.max_segments = host->max_segs; | |
380 | } | |
381 | ||
382 | disk = blk_mq_alloc_disk(&mq->tag_set, &lim, mq); | |
383 | if (IS_ERR(disk)) | |
384 | return disk; | |
385 | mq->queue = disk->queue; | |
386 | ||
387 | if (mmc_host_is_spi(host) && host->use_spi_crc) | |
388 | blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, mq->queue); | |
389 | blk_queue_rq_timeout(mq->queue, 60 * HZ); | |
390 | ||
391 | blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue); | |
392 | blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue); | |
c8b5fd03 | 393 | |
cf1db7fc CH |
394 | dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue)); |
395 | ||
1e8e55b6 | 396 | INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler); |
81196976 AH |
397 | INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work); |
398 | ||
399 | mutex_init(&mq->complete_lock); | |
400 | ||
401 | init_waitqueue_head(&mq->wait); | |
93f1c150 EB |
402 | |
403 | mmc_crypto_setup_queue(mq->queue, host); | |
616f8766 | 404 | return disk; |
81196976 AH |
405 | } |
406 | ||
427b0034 YS |
407 | static inline bool mmc_merge_capable(struct mmc_host *host) |
408 | { | |
409 | return host->caps2 & MMC_CAP2_MERGE_CAPABLE; | |
410 | } | |
411 | ||
b061b326 CH |
412 | /* Set queue depth to get a reasonable value for q->nr_requests */ |
413 | #define MMC_QUEUE_DEPTH 64 | |
414 | ||
415 | /** | |
416 | * mmc_init_queue - initialise a queue structure. | |
417 | * @mq: mmc queue | |
418 | * @card: mmc card to attach this queue | |
b061b326 CH |
419 | * |
420 | * Initialise a MMC card request queue. | |
421 | */ | |
607d968a | 422 | struct gendisk *mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card) |
81196976 | 423 | { |
b061b326 | 424 | struct mmc_host *host = card->host; |
607d968a | 425 | struct gendisk *disk; |
81196976 AH |
426 | int ret; |
427 | ||
b061b326 | 428 | mq->card = card; |
f5d72c5c CH |
429 | |
430 | spin_lock_init(&mq->lock); | |
b061b326 | 431 | |
81196976 | 432 | memset(&mq->tag_set, 0, sizeof(mq->tag_set)); |
b061b326 CH |
433 | mq->tag_set.ops = &mmc_mq_ops; |
434 | /* | |
435 | * The queue depth for CQE must match the hardware because the request | |
436 | * tag is used to index the hardware queue. | |
437 | */ | |
407a1c57 | 438 | if (host->cqe_enabled && !host->hsq_enabled) |
b061b326 CH |
439 | mq->tag_set.queue_depth = |
440 | min_t(int, card->ext_csd.cmdq_depth, host->cqe_qdepth); | |
441 | else | |
442 | mq->tag_set.queue_depth = MMC_QUEUE_DEPTH; | |
81196976 | 443 | mq->tag_set.numa_node = NUMA_NO_NODE; |
56d18f62 | 444 | mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING; |
81196976 AH |
445 | mq->tag_set.nr_hw_queues = 1; |
446 | mq->tag_set.cmd_size = sizeof(struct mmc_queue_req); | |
447 | mq->tag_set.driver_data = mq; | |
448 | ||
38c38cb7 YS |
449 | /* |
450 | * Since blk_mq_alloc_tag_set() calls .init_request() of mmc_mq_ops, | |
451 | * the host->can_dma_map_merge should be set before to get max_segs | |
452 | * from mmc_get_max_segments(). | |
453 | */ | |
427b0034 YS |
454 | if (mmc_merge_capable(host) && |
455 | host->max_segs < MMC_DMA_MAP_MERGE_SEGMENTS && | |
38c38cb7 YS |
456 | dma_get_merge_boundary(mmc_dev(host))) |
457 | host->can_dma_map_merge = 1; | |
458 | else | |
459 | host->can_dma_map_merge = 0; | |
460 | ||
81196976 AH |
461 | ret = blk_mq_alloc_tag_set(&mq->tag_set); |
462 | if (ret) | |
607d968a CH |
463 | return ERR_PTR(ret); |
464 | ||
81196976 | 465 | |
616f8766 CH |
466 | disk = mmc_alloc_disk(mq, card); |
467 | if (IS_ERR(disk)) | |
607d968a | 468 | blk_mq_free_tag_set(&mq->tag_set); |
607d968a | 469 | return disk; |
81196976 AH |
470 | } |
471 | ||
0fbfd125 | 472 | void mmc_queue_suspend(struct mmc_queue *mq) |
81196976 AH |
473 | { |
474 | blk_mq_quiesce_queue(mq->queue); | |
475 | ||
476 | /* | |
477 | * The host remains claimed while there are outstanding requests, so | |
478 | * simply claiming and releasing here ensures there are none. | |
479 | */ | |
480 | mmc_claim_host(mq->card->host); | |
481 | mmc_release_host(mq->card->host); | |
482 | } | |
483 | ||
0fbfd125 | 484 | void mmc_queue_resume(struct mmc_queue *mq) |
81196976 AH |
485 | { |
486 | blk_mq_unquiesce_queue(mq->queue); | |
487 | } | |
488 | ||
1da177e4 LT |
489 | void mmc_cleanup_queue(struct mmc_queue *mq) |
490 | { | |
165125e1 | 491 | struct request_queue *q = mq->queue; |
89b4e133 | 492 | |
0fbfd125 AH |
493 | /* |
494 | * The legacy code handled the possibility of being suspended, | |
495 | * so do that here too. | |
496 | */ | |
497 | if (blk_queue_quiesced(q)) | |
498 | blk_mq_unquiesce_queue(q); | |
5fa83ce2 | 499 | |
339e3eb1 CL |
500 | /* |
501 | * If the recovery completes the last (and only remaining) request in | |
502 | * the queue, and the card has been removed, we could end up here with | |
503 | * the recovery not quite finished yet, so cancel it. | |
504 | */ | |
505 | cancel_work_sync(&mq->recovery_work); | |
506 | ||
43d8dabb | 507 | blk_mq_free_tag_set(&mq->tag_set); |
41e3efd0 | 508 | |
81196976 AH |
509 | /* |
510 | * A request can be completed before the next request, potentially | |
511 | * leaving a complete_work with nothing to do. Such a work item might | |
512 | * still be queued at this point. Flush it. | |
513 | */ | |
514 | flush_work(&mq->complete_work); | |
515 | ||
1da177e4 LT |
516 | mq->card = NULL; |
517 | } | |
1da177e4 | 518 | |
2ff1fa67 PO |
519 | /* |
520 | * Prepare the sg list(s) to be handed of to the host driver | |
521 | */ | |
97868a2b | 522 | unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) |
98ccf149 | 523 | { |
67e69d52 | 524 | struct request *req = mmc_queue_req_to_req(mqrq); |
98ccf149 | 525 | |
de3ee99b | 526 | return blk_rq_map_sg(mq->queue, req, mqrq->sg); |
98ccf149 | 527 | } |