Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 | 2 | * Copyright (C) 2003 Russell King, All Rights Reserved. |
98ac2162 | 3 | * Copyright 2006-2007 Pierre Ossman |
1da177e4 LT |
4 | * |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | */ | |
5a0e3ad6 | 10 | #include <linux/slab.h> |
1da177e4 LT |
11 | #include <linux/module.h> |
12 | #include <linux/blkdev.h> | |
83144186 | 13 | #include <linux/freezer.h> |
87598a2b | 14 | #include <linux/kthread.h> |
45711f1a | 15 | #include <linux/scatterlist.h> |
8e0cb8a1 | 16 | #include <linux/dma-mapping.h> |
1da177e4 LT |
17 | |
18 | #include <linux/mmc/card.h> | |
19 | #include <linux/mmc/host.h> | |
29eb7bd0 | 20 | |
98ac2162 | 21 | #include "queue.h" |
29eb7bd0 | 22 | #include "block.h" |
55244c56 | 23 | #include "core.h" |
4facdde1 | 24 | #include "card.h" |
81196976 | 25 | #include "host.h" |
1da177e4 | 26 | |
1da177e4 | 27 | /* |
9c9f2d63 | 28 | * Prepare a MMC request. This just filters out odd stuff. |
1da177e4 LT |
29 | */ |
30 | static int mmc_prep_request(struct request_queue *q, struct request *req) | |
31 | { | |
a8ad82cc SRT |
32 | struct mmc_queue *mq = q->queuedata; |
33 | ||
14f4ca7e | 34 | if (mq && mmc_card_removed(mq->card)) |
a8ad82cc SRT |
35 | return BLKPREP_KILL; |
36 | ||
e8064021 | 37 | req->rq_flags |= RQF_DONTPREP; |
81196976 | 38 | req_to_mmc_queue_req(req)->retries = 0; |
1da177e4 | 39 | |
9c9f2d63 | 40 | return BLKPREP_OK; |
1da177e4 LT |
41 | } |
42 | ||
81196976 AH |
43 | enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req) |
44 | { | |
45 | if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE) | |
46 | return MMC_ISSUE_ASYNC; | |
47 | ||
48 | return MMC_ISSUE_SYNC; | |
49 | } | |
50 | ||
51 | static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req, | |
52 | bool reserved) | |
53 | { | |
54 | return BLK_EH_RESET_TIMER; | |
55 | } | |
56 | ||
1da177e4 LT |
57 | static int mmc_queue_thread(void *d) |
58 | { | |
59 | struct mmc_queue *mq = d; | |
60 | struct request_queue *q = mq->queue; | |
e0097cf5 | 61 | struct mmc_context_info *cntx = &mq->card->host->context_info; |
1da177e4 | 62 | |
83144186 | 63 | current->flags |= PF_MEMALLOC; |
1da177e4 | 64 | |
1da177e4 | 65 | down(&mq->thread_sem); |
1da177e4 | 66 | do { |
cdf8a6fb | 67 | struct request *req; |
1da177e4 LT |
68 | |
69 | spin_lock_irq(q->queue_lock); | |
70 | set_current_state(TASK_INTERRUPTIBLE); | |
7eaceacc | 71 | req = blk_fetch_request(q); |
e0097cf5 AH |
72 | mq->asleep = false; |
73 | cntx->is_waiting_last_req = false; | |
74 | cntx->is_new_req = false; | |
75 | if (!req) { | |
76 | /* | |
77 | * Dispatch queue is empty so set flags for | |
78 | * mmc_request_fn() to wake us up. | |
79 | */ | |
cdf8a6fb | 80 | if (mq->qcnt) |
e0097cf5 AH |
81 | cntx->is_waiting_last_req = true; |
82 | else | |
83 | mq->asleep = true; | |
84 | } | |
1da177e4 LT |
85 | spin_unlock_irq(q->queue_lock); |
86 | ||
cdf8a6fb | 87 | if (req || mq->qcnt) { |
ee8a43a5 | 88 | set_current_state(TASK_RUNNING); |
29eb7bd0 | 89 | mmc_blk_issue_rq(mq, req); |
a8c27c0b | 90 | cond_resched(); |
ee8a43a5 | 91 | } else { |
7b30d281 VW |
92 | if (kthread_should_stop()) { |
93 | set_current_state(TASK_RUNNING); | |
1da177e4 | 94 | break; |
7b30d281 | 95 | } |
1da177e4 LT |
96 | up(&mq->thread_sem); |
97 | schedule(); | |
98 | down(&mq->thread_sem); | |
1da177e4 | 99 | } |
1da177e4 | 100 | } while (1); |
1da177e4 LT |
101 | up(&mq->thread_sem); |
102 | ||
1da177e4 LT |
103 | return 0; |
104 | } | |
105 | ||
106 | /* | |
107 | * Generic MMC request handler. This is called for any queue on a | |
108 | * particular host. When the host is not busy, we look for a request | |
109 | * on any queue on this host, and attempt to issue it. This may | |
110 | * not be the queue we were asked to process. | |
111 | */ | |
1b50f5f3 | 112 | static void mmc_request_fn(struct request_queue *q) |
1da177e4 LT |
113 | { |
114 | struct mmc_queue *mq = q->queuedata; | |
89b4e133 | 115 | struct request *req; |
2220eedf | 116 | struct mmc_context_info *cntx; |
89b4e133 PO |
117 | |
118 | if (!mq) { | |
5fa83ce2 | 119 | while ((req = blk_fetch_request(q)) != NULL) { |
e8064021 | 120 | req->rq_flags |= RQF_QUIET; |
2a842aca | 121 | __blk_end_request_all(req, BLK_STS_IOERR); |
5fa83ce2 | 122 | } |
89b4e133 PO |
123 | return; |
124 | } | |
1da177e4 | 125 | |
2220eedf | 126 | cntx = &mq->card->host->context_info; |
e0097cf5 AH |
127 | |
128 | if (cntx->is_waiting_last_req) { | |
129 | cntx->is_new_req = true; | |
130 | wake_up_interruptible(&cntx->wait); | |
131 | } | |
132 | ||
133 | if (mq->asleep) | |
87598a2b | 134 | wake_up_process(mq->thread); |
1da177e4 LT |
135 | } |
136 | ||
304419d8 | 137 | static struct scatterlist *mmc_alloc_sg(int sg_len, gfp_t gfp) |
97868a2b PF |
138 | { |
139 | struct scatterlist *sg; | |
140 | ||
304419d8 | 141 | sg = kmalloc_array(sg_len, sizeof(*sg), gfp); |
7b410d07 | 142 | if (sg) |
97868a2b | 143 | sg_init_table(sg, sg_len); |
97868a2b PF |
144 | |
145 | return sg; | |
146 | } | |
147 | ||
e056a1b5 AH |
148 | static void mmc_queue_setup_discard(struct request_queue *q, |
149 | struct mmc_card *card) | |
150 | { | |
151 | unsigned max_discard; | |
152 | ||
153 | max_discard = mmc_calc_max_discard(card); | |
154 | if (!max_discard) | |
155 | return; | |
156 | ||
157 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); | |
2bb4cd5c | 158 | blk_queue_max_discard_sectors(q, max_discard); |
e056a1b5 AH |
159 | q->limits.discard_granularity = card->pref_erase << 9; |
160 | /* granularity must not be greater than max. discard */ | |
161 | if (card->pref_erase > max_discard) | |
162 | q->limits.discard_granularity = 0; | |
775a9362 | 163 | if (mmc_can_secure_erase_trim(card)) |
288dab8a | 164 | queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q); |
e056a1b5 AH |
165 | } |
166 | ||
304419d8 LW |
167 | /** |
168 | * mmc_init_request() - initialize the MMC-specific per-request data | |
169 | * @q: the request queue | |
170 | * @req: the request | |
171 | * @gfp: memory allocation policy | |
172 | */ | |
81196976 AH |
173 | static int __mmc_init_request(struct mmc_queue *mq, struct request *req, |
174 | gfp_t gfp) | |
64e29e42 | 175 | { |
304419d8 | 176 | struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); |
304419d8 LW |
177 | struct mmc_card *card = mq->card; |
178 | struct mmc_host *host = card->host; | |
64e29e42 | 179 | |
de3ee99b LW |
180 | mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp); |
181 | if (!mq_rq->sg) | |
182 | return -ENOMEM; | |
64e29e42 | 183 | |
c5bda0ca AH |
184 | return 0; |
185 | } | |
64e29e42 | 186 | |
81196976 AH |
187 | static int mmc_init_request(struct request_queue *q, struct request *req, |
188 | gfp_t gfp) | |
189 | { | |
190 | return __mmc_init_request(q->queuedata, req, gfp); | |
191 | } | |
192 | ||
304419d8 | 193 | static void mmc_exit_request(struct request_queue *q, struct request *req) |
c5bda0ca | 194 | { |
304419d8 | 195 | struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); |
7b410d07 | 196 | |
304419d8 LW |
197 | kfree(mq_rq->sg); |
198 | mq_rq->sg = NULL; | |
c09949cf AH |
199 | } |
200 | ||
81196976 AH |
201 | static int mmc_mq_init_request(struct blk_mq_tag_set *set, struct request *req, |
202 | unsigned int hctx_idx, unsigned int numa_node) | |
203 | { | |
204 | return __mmc_init_request(set->driver_data, req, GFP_KERNEL); | |
205 | } | |
206 | ||
207 | static void mmc_mq_exit_request(struct blk_mq_tag_set *set, struct request *req, | |
208 | unsigned int hctx_idx) | |
209 | { | |
210 | struct mmc_queue *mq = set->driver_data; | |
211 | ||
212 | mmc_exit_request(mq->queue, req); | |
213 | } | |
214 | ||
215 | /* | |
216 | * We use BLK_MQ_F_BLOCKING and have only 1 hardware queue, which means requests | |
217 | * will not be dispatched in parallel. | |
218 | */ | |
219 | static blk_status_t mmc_mq_queue_rq(struct blk_mq_hw_ctx *hctx, | |
220 | const struct blk_mq_queue_data *bd) | |
221 | { | |
222 | struct request *req = bd->rq; | |
223 | struct request_queue *q = req->q; | |
224 | struct mmc_queue *mq = q->queuedata; | |
225 | struct mmc_card *card = mq->card; | |
226 | enum mmc_issue_type issue_type; | |
227 | enum mmc_issued issued; | |
228 | bool get_card; | |
229 | int ret; | |
230 | ||
231 | if (mmc_card_removed(mq->card)) { | |
232 | req->rq_flags |= RQF_QUIET; | |
233 | return BLK_STS_IOERR; | |
234 | } | |
235 | ||
236 | issue_type = mmc_issue_type(mq, req); | |
237 | ||
238 | spin_lock_irq(q->queue_lock); | |
239 | ||
240 | switch (issue_type) { | |
241 | case MMC_ISSUE_ASYNC: | |
242 | break; | |
243 | default: | |
244 | /* | |
245 | * Timeouts are handled by mmc core, and we don't have a host | |
246 | * API to abort requests, so we can't handle the timeout anyway. | |
247 | * However, when the timeout happens, blk_mq_complete_request() | |
248 | * no longer works (to stop the request disappearing under us). | |
249 | * To avoid racing with that, set a large timeout. | |
250 | */ | |
251 | req->timeout = 600 * HZ; | |
252 | break; | |
253 | } | |
254 | ||
255 | mq->in_flight[issue_type] += 1; | |
256 | get_card = (mmc_tot_in_flight(mq) == 1); | |
257 | ||
258 | spin_unlock_irq(q->queue_lock); | |
259 | ||
260 | if (!(req->rq_flags & RQF_DONTPREP)) { | |
261 | req_to_mmc_queue_req(req)->retries = 0; | |
262 | req->rq_flags |= RQF_DONTPREP; | |
263 | } | |
264 | ||
265 | if (get_card) | |
266 | mmc_get_card(card, &mq->ctx); | |
267 | ||
268 | blk_mq_start_request(req); | |
269 | ||
270 | issued = mmc_blk_mq_issue_rq(mq, req); | |
271 | ||
272 | switch (issued) { | |
273 | case MMC_REQ_BUSY: | |
274 | ret = BLK_STS_RESOURCE; | |
275 | break; | |
276 | case MMC_REQ_FAILED_TO_START: | |
277 | ret = BLK_STS_IOERR; | |
278 | break; | |
279 | default: | |
280 | ret = BLK_STS_OK; | |
281 | break; | |
282 | } | |
283 | ||
284 | if (issued != MMC_REQ_STARTED) { | |
285 | bool put_card = false; | |
286 | ||
287 | spin_lock_irq(q->queue_lock); | |
288 | mq->in_flight[issue_type] -= 1; | |
289 | if (mmc_tot_in_flight(mq) == 0) | |
290 | put_card = true; | |
291 | spin_unlock_irq(q->queue_lock); | |
292 | if (put_card) | |
293 | mmc_put_card(card, &mq->ctx); | |
294 | } | |
295 | ||
296 | return ret; | |
297 | } | |
298 | ||
299 | static const struct blk_mq_ops mmc_mq_ops = { | |
300 | .queue_rq = mmc_mq_queue_rq, | |
301 | .init_request = mmc_mq_init_request, | |
302 | .exit_request = mmc_mq_exit_request, | |
303 | .complete = mmc_blk_mq_complete, | |
304 | .timeout = mmc_mq_timed_out, | |
305 | }; | |
306 | ||
c8b5fd03 AH |
307 | static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) |
308 | { | |
309 | struct mmc_host *host = card->host; | |
310 | u64 limit = BLK_BOUNCE_HIGH; | |
311 | ||
312 | if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) | |
313 | limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; | |
314 | ||
315 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); | |
316 | queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue); | |
317 | if (mmc_can_erase(card)) | |
318 | mmc_queue_setup_discard(mq->queue, card); | |
319 | ||
320 | blk_queue_bounce_limit(mq->queue, limit); | |
321 | blk_queue_max_hw_sectors(mq->queue, | |
322 | min(host->max_blk_count, host->max_req_size / 512)); | |
323 | blk_queue_max_segments(mq->queue, host->max_segs); | |
324 | blk_queue_max_segment_size(mq->queue, host->max_seg_size); | |
325 | ||
326 | /* Initialize thread_sem even if it is not used */ | |
327 | sema_init(&mq->thread_sem, 1); | |
81196976 AH |
328 | |
329 | INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work); | |
330 | ||
331 | mutex_init(&mq->complete_lock); | |
332 | ||
333 | init_waitqueue_head(&mq->wait); | |
334 | } | |
335 | ||
336 | static int mmc_mq_init_queue(struct mmc_queue *mq, int q_depth, | |
337 | const struct blk_mq_ops *mq_ops, spinlock_t *lock) | |
338 | { | |
339 | int ret; | |
340 | ||
341 | memset(&mq->tag_set, 0, sizeof(mq->tag_set)); | |
342 | mq->tag_set.ops = mq_ops; | |
343 | mq->tag_set.queue_depth = q_depth; | |
344 | mq->tag_set.numa_node = NUMA_NO_NODE; | |
345 | mq->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE | | |
346 | BLK_MQ_F_BLOCKING; | |
347 | mq->tag_set.nr_hw_queues = 1; | |
348 | mq->tag_set.cmd_size = sizeof(struct mmc_queue_req); | |
349 | mq->tag_set.driver_data = mq; | |
350 | ||
351 | ret = blk_mq_alloc_tag_set(&mq->tag_set); | |
352 | if (ret) | |
353 | return ret; | |
354 | ||
355 | mq->queue = blk_mq_init_queue(&mq->tag_set); | |
356 | if (IS_ERR(mq->queue)) { | |
357 | ret = PTR_ERR(mq->queue); | |
358 | goto free_tag_set; | |
359 | } | |
360 | ||
361 | mq->queue->queue_lock = lock; | |
362 | mq->queue->queuedata = mq; | |
363 | ||
364 | return 0; | |
365 | ||
366 | free_tag_set: | |
367 | blk_mq_free_tag_set(&mq->tag_set); | |
368 | ||
369 | return ret; | |
370 | } | |
371 | ||
372 | /* Set queue depth to get a reasonable value for q->nr_requests */ | |
373 | #define MMC_QUEUE_DEPTH 64 | |
374 | ||
375 | static int mmc_mq_init(struct mmc_queue *mq, struct mmc_card *card, | |
376 | spinlock_t *lock) | |
377 | { | |
378 | int q_depth; | |
379 | int ret; | |
380 | ||
381 | q_depth = MMC_QUEUE_DEPTH; | |
382 | ||
383 | ret = mmc_mq_init_queue(mq, q_depth, &mmc_mq_ops, lock); | |
384 | if (ret) | |
385 | return ret; | |
386 | ||
387 | blk_queue_rq_timeout(mq->queue, 60 * HZ); | |
388 | ||
389 | mmc_setup_queue(mq, card); | |
390 | ||
391 | return 0; | |
c8b5fd03 AH |
392 | } |
393 | ||
1da177e4 LT |
394 | /** |
395 | * mmc_init_queue - initialise a queue structure. | |
396 | * @mq: mmc queue | |
397 | * @card: mmc card to attach this queue | |
398 | * @lock: queue lock | |
d09408ad | 399 | * @subname: partition subname |
1da177e4 LT |
400 | * |
401 | * Initialise a MMC card request queue. | |
402 | */ | |
d09408ad AH |
403 | int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, |
404 | spinlock_t *lock, const char *subname) | |
1da177e4 LT |
405 | { |
406 | struct mmc_host *host = card->host; | |
c5bda0ca | 407 | int ret = -ENOMEM; |
1da177e4 | 408 | |
1da177e4 | 409 | mq->card = card; |
81196976 AH |
410 | |
411 | if (mmc_host_use_blk_mq(host)) | |
412 | return mmc_mq_init(mq, card, lock); | |
413 | ||
304419d8 | 414 | mq->queue = blk_alloc_queue(GFP_KERNEL); |
1da177e4 LT |
415 | if (!mq->queue) |
416 | return -ENOMEM; | |
304419d8 LW |
417 | mq->queue->queue_lock = lock; |
418 | mq->queue->request_fn = mmc_request_fn; | |
419 | mq->queue->init_rq_fn = mmc_init_request; | |
420 | mq->queue->exit_rq_fn = mmc_exit_request; | |
421 | mq->queue->cmd_size = sizeof(struct mmc_queue_req); | |
1da177e4 | 422 | mq->queue->queuedata = mq; |
304419d8 LW |
423 | mq->qcnt = 0; |
424 | ret = blk_init_allocated_queue(mq->queue); | |
425 | if (ret) { | |
426 | blk_cleanup_queue(mq->queue); | |
427 | return ret; | |
428 | } | |
1da177e4 | 429 | |
98ccf149 PO |
430 | blk_queue_prep_rq(mq->queue, mmc_prep_request); |
431 | ||
c8b5fd03 | 432 | mmc_setup_queue(mq, card); |
1da177e4 | 433 | |
d09408ad AH |
434 | mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s", |
435 | host->index, subname ? subname : ""); | |
de528fa3 | 436 | |
87598a2b CH |
437 | if (IS_ERR(mq->thread)) { |
438 | ret = PTR_ERR(mq->thread); | |
c09949cf | 439 | goto cleanup_queue; |
1da177e4 LT |
440 | } |
441 | ||
87598a2b | 442 | return 0; |
97868a2b | 443 | |
7b410d07 | 444 | cleanup_queue: |
1da177e4 | 445 | blk_cleanup_queue(mq->queue); |
1da177e4 LT |
446 | return ret; |
447 | } | |
1da177e4 | 448 | |
81196976 AH |
449 | static void mmc_mq_queue_suspend(struct mmc_queue *mq) |
450 | { | |
451 | blk_mq_quiesce_queue(mq->queue); | |
452 | ||
453 | /* | |
454 | * The host remains claimed while there are outstanding requests, so | |
455 | * simply claiming and releasing here ensures there are none. | |
456 | */ | |
457 | mmc_claim_host(mq->card->host); | |
458 | mmc_release_host(mq->card->host); | |
459 | } | |
460 | ||
461 | static void mmc_mq_queue_resume(struct mmc_queue *mq) | |
462 | { | |
463 | blk_mq_unquiesce_queue(mq->queue); | |
464 | } | |
465 | ||
466 | static void __mmc_queue_suspend(struct mmc_queue *mq) | |
467 | { | |
468 | struct request_queue *q = mq->queue; | |
469 | unsigned long flags; | |
470 | ||
471 | if (!mq->suspended) { | |
472 | mq->suspended |= true; | |
473 | ||
474 | spin_lock_irqsave(q->queue_lock, flags); | |
475 | blk_stop_queue(q); | |
476 | spin_unlock_irqrestore(q->queue_lock, flags); | |
477 | ||
478 | down(&mq->thread_sem); | |
479 | } | |
480 | } | |
481 | ||
482 | static void __mmc_queue_resume(struct mmc_queue *mq) | |
483 | { | |
484 | struct request_queue *q = mq->queue; | |
485 | unsigned long flags; | |
486 | ||
487 | if (mq->suspended) { | |
488 | mq->suspended = false; | |
489 | ||
490 | up(&mq->thread_sem); | |
491 | ||
492 | spin_lock_irqsave(q->queue_lock, flags); | |
493 | blk_start_queue(q); | |
494 | spin_unlock_irqrestore(q->queue_lock, flags); | |
495 | } | |
496 | } | |
497 | ||
1da177e4 LT |
498 | void mmc_cleanup_queue(struct mmc_queue *mq) |
499 | { | |
165125e1 | 500 | struct request_queue *q = mq->queue; |
89b4e133 PO |
501 | unsigned long flags; |
502 | ||
81196976 AH |
503 | if (q->mq_ops) { |
504 | /* | |
505 | * The legacy code handled the possibility of being suspended, | |
506 | * so do that here too. | |
507 | */ | |
508 | if (blk_queue_quiesced(q)) | |
509 | blk_mq_unquiesce_queue(q); | |
510 | goto out_cleanup; | |
511 | } | |
512 | ||
d2b46f66 PO |
513 | /* Make sure the queue isn't suspended, as that will deadlock */ |
514 | mmc_queue_resume(mq); | |
515 | ||
89b4e133 | 516 | /* Then terminate our worker thread */ |
87598a2b | 517 | kthread_stop(mq->thread); |
1da177e4 | 518 | |
5fa83ce2 AH |
519 | /* Empty the queue */ |
520 | spin_lock_irqsave(q->queue_lock, flags); | |
521 | q->queuedata = NULL; | |
522 | blk_start_queue(q); | |
523 | spin_unlock_irqrestore(q->queue_lock, flags); | |
524 | ||
81196976 | 525 | out_cleanup: |
41e3efd0 AH |
526 | blk_cleanup_queue(q); |
527 | ||
81196976 AH |
528 | /* |
529 | * A request can be completed before the next request, potentially | |
530 | * leaving a complete_work with nothing to do. Such a work item might | |
531 | * still be queued at this point. Flush it. | |
532 | */ | |
533 | flush_work(&mq->complete_work); | |
534 | ||
1da177e4 LT |
535 | mq->card = NULL; |
536 | } | |
1da177e4 LT |
537 | |
538 | /** | |
539 | * mmc_queue_suspend - suspend a MMC request queue | |
540 | * @mq: MMC queue to suspend | |
541 | * | |
542 | * Stop the block request queue, and wait for our thread to | |
543 | * complete any outstanding requests. This ensures that we | |
544 | * won't suspend while a request is being processed. | |
545 | */ | |
546 | void mmc_queue_suspend(struct mmc_queue *mq) | |
547 | { | |
165125e1 | 548 | struct request_queue *q = mq->queue; |
1da177e4 | 549 | |
81196976 AH |
550 | if (q->mq_ops) |
551 | mmc_mq_queue_suspend(mq); | |
552 | else | |
553 | __mmc_queue_suspend(mq); | |
1da177e4 | 554 | } |
1da177e4 LT |
555 | |
556 | /** | |
557 | * mmc_queue_resume - resume a previously suspended MMC request queue | |
558 | * @mq: MMC queue to resume | |
559 | */ | |
560 | void mmc_queue_resume(struct mmc_queue *mq) | |
561 | { | |
165125e1 | 562 | struct request_queue *q = mq->queue; |
1da177e4 | 563 | |
81196976 AH |
564 | if (q->mq_ops) |
565 | mmc_mq_queue_resume(mq); | |
566 | else | |
567 | __mmc_queue_resume(mq); | |
1da177e4 | 568 | } |
98ac2162 | 569 | |
2ff1fa67 PO |
570 | /* |
571 | * Prepare the sg list(s) to be handed of to the host driver | |
572 | */ | |
97868a2b | 573 | unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) |
98ccf149 | 574 | { |
67e69d52 | 575 | struct request *req = mmc_queue_req_to_req(mqrq); |
98ccf149 | 576 | |
de3ee99b | 577 | return blk_rq_map_sg(mq->queue, req, mqrq->sg); |
98ccf149 | 578 | } |