block: Fix secure erase
[linux-2.6-block.git] / drivers / mmc / card / queue.c
CommitLineData
1da177e4 1/*
70f10482 2 * linux/drivers/mmc/card/queue.c
1da177e4
LT
3 *
4 * Copyright (C) 2003 Russell King, All Rights Reserved.
98ac2162 5 * Copyright 2006-2007 Pierre Ossman
1da177e4
LT
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
5a0e3ad6 12#include <linux/slab.h>
1da177e4
LT
13#include <linux/module.h>
14#include <linux/blkdev.h>
83144186 15#include <linux/freezer.h>
87598a2b 16#include <linux/kthread.h>
45711f1a 17#include <linux/scatterlist.h>
8e0cb8a1 18#include <linux/dma-mapping.h>
1da177e4
LT
19
20#include <linux/mmc/card.h>
21#include <linux/mmc/host.h>
98ac2162 22#include "queue.h"
1da177e4 23
98ccf149
PO
24#define MMC_QUEUE_BOUNCESZ 65536
25
1da177e4 26/*
9c9f2d63 27 * Prepare a MMC request. This just filters out odd stuff.
1da177e4
LT
28 */
29static int mmc_prep_request(struct request_queue *q, struct request *req)
30{
a8ad82cc
SRT
31 struct mmc_queue *mq = q->queuedata;
32
9c9f2d63 33 /*
bd788c96 34 * We only like normal block requests and discards.
9c9f2d63 35 */
7afafc8a
AH
36 if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD &&
37 req_op(req) != REQ_OP_SECURE_ERASE) {
1da177e4 38 blk_dump_rq_flags(req, "MMC bad request");
9c9f2d63 39 return BLKPREP_KILL;
1da177e4
LT
40 }
41
4e93b9a6 42 if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
a8ad82cc
SRT
43 return BLKPREP_KILL;
44
9c9f2d63 45 req->cmd_flags |= REQ_DONTPREP;
1da177e4 46
9c9f2d63 47 return BLKPREP_OK;
1da177e4
LT
48}
49
50static int mmc_queue_thread(void *d)
51{
52 struct mmc_queue *mq = d;
53 struct request_queue *q = mq->queue;
1da177e4 54
83144186 55 current->flags |= PF_MEMALLOC;
1da177e4 56
1da177e4 57 down(&mq->thread_sem);
1da177e4
LT
58 do {
59 struct request *req = NULL;
60
61 spin_lock_irq(q->queue_lock);
62 set_current_state(TASK_INTERRUPTIBLE);
7eaceacc 63 req = blk_fetch_request(q);
97868a2b 64 mq->mqrq_cur->req = req;
1da177e4
LT
65 spin_unlock_irq(q->queue_lock);
66
ee8a43a5
PF
67 if (req || mq->mqrq_prev->req) {
68 set_current_state(TASK_RUNNING);
69 mq->issue_fn(mq, req);
a8c27c0b 70 cond_resched();
2220eedf
KD
71 if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
72 mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
73 continue; /* fetch again */
74 }
45c5a914
SJ
75
76 /*
77 * Current request becomes previous request
78 * and vice versa.
369d321e
SJ
79 * In case of special requests, current request
80 * has been finished. Do not assign it to previous
81 * request.
45c5a914 82 */
c2df40df 83 if (mmc_req_is_special(req))
369d321e
SJ
84 mq->mqrq_cur->req = NULL;
85
45c5a914
SJ
86 mq->mqrq_prev->brq.mrq.data = NULL;
87 mq->mqrq_prev->req = NULL;
7551847c 88 swap(mq->mqrq_prev, mq->mqrq_cur);
ee8a43a5 89 } else {
7b30d281
VW
90 if (kthread_should_stop()) {
91 set_current_state(TASK_RUNNING);
1da177e4 92 break;
7b30d281 93 }
1da177e4
LT
94 up(&mq->thread_sem);
95 schedule();
96 down(&mq->thread_sem);
1da177e4 97 }
1da177e4 98 } while (1);
1da177e4
LT
99 up(&mq->thread_sem);
100
1da177e4
LT
101 return 0;
102}
103
104/*
105 * Generic MMC request handler. This is called for any queue on a
106 * particular host. When the host is not busy, we look for a request
107 * on any queue on this host, and attempt to issue it. This may
108 * not be the queue we were asked to process.
109 */
1b50f5f3 110static void mmc_request_fn(struct request_queue *q)
1da177e4
LT
111{
112 struct mmc_queue *mq = q->queuedata;
89b4e133 113 struct request *req;
2220eedf
KD
114 unsigned long flags;
115 struct mmc_context_info *cntx;
89b4e133
PO
116
117 if (!mq) {
5fa83ce2
AH
118 while ((req = blk_fetch_request(q)) != NULL) {
119 req->cmd_flags |= REQ_QUIET;
296b2f6a 120 __blk_end_request_all(req, -EIO);
5fa83ce2 121 }
89b4e133
PO
122 return;
123 }
1da177e4 124
2220eedf
KD
125 cntx = &mq->card->host->context_info;
126 if (!mq->mqrq_cur->req && mq->mqrq_prev->req) {
127 /*
128 * New MMC request arrived when MMC thread may be
129 * blocked on the previous request to be complete
130 * with no current request fetched
131 */
132 spin_lock_irqsave(&cntx->lock, flags);
133 if (cntx->is_waiting_last_req) {
134 cntx->is_new_req = true;
135 wake_up_interruptible(&cntx->wait);
136 }
137 spin_unlock_irqrestore(&cntx->lock, flags);
138 } else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
87598a2b 139 wake_up_process(mq->thread);
1da177e4
LT
140}
141
7513cd7a 142static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
97868a2b
PF
143{
144 struct scatterlist *sg;
145
146 sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
147 if (!sg)
148 *err = -ENOMEM;
149 else {
150 *err = 0;
151 sg_init_table(sg, sg_len);
152 }
153
154 return sg;
155}
156
e056a1b5
AH
157static void mmc_queue_setup_discard(struct request_queue *q,
158 struct mmc_card *card)
159{
160 unsigned max_discard;
161
162 max_discard = mmc_calc_max_discard(card);
163 if (!max_discard)
164 return;
165
166 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
2bb4cd5c 167 blk_queue_max_discard_sectors(q, max_discard);
7194efb8 168 if (card->erased_byte == 0 && !mmc_can_discard(card))
e056a1b5
AH
169 q->limits.discard_zeroes_data = 1;
170 q->limits.discard_granularity = card->pref_erase << 9;
171 /* granularity must not be greater than max. discard */
172 if (card->pref_erase > max_discard)
173 q->limits.discard_granularity = 0;
775a9362 174 if (mmc_can_secure_erase_trim(card))
288dab8a 175 queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
e056a1b5
AH
176}
177
1da177e4
LT
178/**
179 * mmc_init_queue - initialise a queue structure.
180 * @mq: mmc queue
181 * @card: mmc card to attach this queue
182 * @lock: queue lock
d09408ad 183 * @subname: partition subname
1da177e4
LT
184 *
185 * Initialise a MMC card request queue.
186 */
d09408ad
AH
187int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
188 spinlock_t *lock, const char *subname)
1da177e4
LT
189{
190 struct mmc_host *host = card->host;
191 u64 limit = BLK_BOUNCE_HIGH;
192 int ret;
97868a2b 193 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
04296b7b 194 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
1da177e4 195
fcaf71fd 196 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
e83b3664 197 limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
1da177e4
LT
198
199 mq->card = card;
1b50f5f3 200 mq->queue = blk_init_queue(mmc_request_fn, lock);
1da177e4
LT
201 if (!mq->queue)
202 return -ENOMEM;
203
97868a2b 204 mq->mqrq_cur = mqrq_cur;
04296b7b 205 mq->mqrq_prev = mqrq_prev;
1da177e4 206 mq->queue->queuedata = mq;
1da177e4 207
98ccf149 208 blk_queue_prep_rq(mq->queue, mmc_prep_request);
8dddfe19 209 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
b277da0a 210 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
e056a1b5
AH
211 if (mmc_can_erase(card))
212 mmc_queue_setup_discard(mq->queue, card);
98ccf149
PO
213
214#ifdef CONFIG_MMC_BLOCK_BOUNCE
a36274e0 215 if (host->max_segs == 1) {
aafabfab
PO
216 unsigned int bouncesz;
217
98ccf149
PO
218 bouncesz = MMC_QUEUE_BOUNCESZ;
219
220 if (bouncesz > host->max_req_size)
221 bouncesz = host->max_req_size;
222 if (bouncesz > host->max_seg_size)
223 bouncesz = host->max_seg_size;
f3eb0aaa
PO
224 if (bouncesz > (host->max_blk_count * 512))
225 bouncesz = host->max_blk_count * 512;
226
227 if (bouncesz > 512) {
97868a2b
PF
228 mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
229 if (!mqrq_cur->bounce_buf) {
6606110d 230 pr_warn("%s: unable to allocate bounce cur buffer\n",
f3eb0aaa 231 mmc_card_name(card));
fdb409f6
BS
232 } else {
233 mqrq_prev->bounce_buf =
234 kmalloc(bouncesz, GFP_KERNEL);
235 if (!mqrq_prev->bounce_buf) {
236 pr_warn("%s: unable to allocate bounce prev buffer\n",
237 mmc_card_name(card));
238 kfree(mqrq_cur->bounce_buf);
239 mqrq_cur->bounce_buf = NULL;
240 }
04296b7b 241 }
f3eb0aaa 242 }
98ccf149 243
04296b7b 244 if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
2ff1fa67 245 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
086fa5ff 246 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
8a78362c 247 blk_queue_max_segments(mq->queue, bouncesz / 512);
98ccf149
PO
248 blk_queue_max_segment_size(mq->queue, bouncesz);
249
97868a2b
PF
250 mqrq_cur->sg = mmc_alloc_sg(1, &ret);
251 if (ret)
aafabfab 252 goto cleanup_queue;
98ccf149 253
97868a2b
PF
254 mqrq_cur->bounce_sg =
255 mmc_alloc_sg(bouncesz / 512, &ret);
256 if (ret)
aafabfab 257 goto cleanup_queue;
97868a2b 258
04296b7b
PF
259 mqrq_prev->sg = mmc_alloc_sg(1, &ret);
260 if (ret)
261 goto cleanup_queue;
262
263 mqrq_prev->bounce_sg =
264 mmc_alloc_sg(bouncesz / 512, &ret);
265 if (ret)
266 goto cleanup_queue;
98ccf149
PO
267 }
268 }
269#endif
270
04296b7b 271 if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
98ccf149 272 blk_queue_bounce_limit(mq->queue, limit);
086fa5ff 273 blk_queue_max_hw_sectors(mq->queue,
f3eb0aaa 274 min(host->max_blk_count, host->max_req_size / 512));
a36274e0 275 blk_queue_max_segments(mq->queue, host->max_segs);
98ccf149
PO
276 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
277
97868a2b
PF
278 mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
279 if (ret)
98ccf149 280 goto cleanup_queue;
97868a2b 281
04296b7b
PF
282
283 mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
284 if (ret)
285 goto cleanup_queue;
1da177e4
LT
286 }
287
632cf92a 288 sema_init(&mq->thread_sem, 1);
1da177e4 289
d09408ad
AH
290 mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
291 host->index, subname ? subname : "");
de528fa3 292
87598a2b
CH
293 if (IS_ERR(mq->thread)) {
294 ret = PTR_ERR(mq->thread);
98ccf149 295 goto free_bounce_sg;
1da177e4
LT
296 }
297
87598a2b 298 return 0;
98ccf149 299 free_bounce_sg:
97868a2b
PF
300 kfree(mqrq_cur->bounce_sg);
301 mqrq_cur->bounce_sg = NULL;
04296b7b
PF
302 kfree(mqrq_prev->bounce_sg);
303 mqrq_prev->bounce_sg = NULL;
97868a2b 304
aafabfab 305 cleanup_queue:
97868a2b
PF
306 kfree(mqrq_cur->sg);
307 mqrq_cur->sg = NULL;
308 kfree(mqrq_cur->bounce_buf);
309 mqrq_cur->bounce_buf = NULL;
310
04296b7b
PF
311 kfree(mqrq_prev->sg);
312 mqrq_prev->sg = NULL;
313 kfree(mqrq_prev->bounce_buf);
314 mqrq_prev->bounce_buf = NULL;
315
1da177e4 316 blk_cleanup_queue(mq->queue);
1da177e4
LT
317 return ret;
318}
1da177e4
LT
319
320void mmc_cleanup_queue(struct mmc_queue *mq)
321{
165125e1 322 struct request_queue *q = mq->queue;
89b4e133 323 unsigned long flags;
97868a2b 324 struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
04296b7b 325 struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
89b4e133 326
d2b46f66
PO
327 /* Make sure the queue isn't suspended, as that will deadlock */
328 mmc_queue_resume(mq);
329
89b4e133 330 /* Then terminate our worker thread */
87598a2b 331 kthread_stop(mq->thread);
1da177e4 332
5fa83ce2
AH
333 /* Empty the queue */
334 spin_lock_irqsave(q->queue_lock, flags);
335 q->queuedata = NULL;
336 blk_start_queue(q);
337 spin_unlock_irqrestore(q->queue_lock, flags);
338
97868a2b
PF
339 kfree(mqrq_cur->bounce_sg);
340 mqrq_cur->bounce_sg = NULL;
98ccf149 341
97868a2b
PF
342 kfree(mqrq_cur->sg);
343 mqrq_cur->sg = NULL;
1da177e4 344
97868a2b
PF
345 kfree(mqrq_cur->bounce_buf);
346 mqrq_cur->bounce_buf = NULL;
98ccf149 347
04296b7b
PF
348 kfree(mqrq_prev->bounce_sg);
349 mqrq_prev->bounce_sg = NULL;
350
351 kfree(mqrq_prev->sg);
352 mqrq_prev->sg = NULL;
353
354 kfree(mqrq_prev->bounce_buf);
355 mqrq_prev->bounce_buf = NULL;
356
1da177e4
LT
357 mq->card = NULL;
358}
359EXPORT_SYMBOL(mmc_cleanup_queue);
360
ce39f9d1
SJ
361int mmc_packed_init(struct mmc_queue *mq, struct mmc_card *card)
362{
363 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
364 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
365 int ret = 0;
366
367
368 mqrq_cur->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
369 if (!mqrq_cur->packed) {
370 pr_warn("%s: unable to allocate packed cmd for mqrq_cur\n",
371 mmc_card_name(card));
372 ret = -ENOMEM;
373 goto out;
374 }
375
376 mqrq_prev->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
377 if (!mqrq_prev->packed) {
378 pr_warn("%s: unable to allocate packed cmd for mqrq_prev\n",
379 mmc_card_name(card));
380 kfree(mqrq_cur->packed);
381 mqrq_cur->packed = NULL;
382 ret = -ENOMEM;
383 goto out;
384 }
385
386 INIT_LIST_HEAD(&mqrq_cur->packed->list);
387 INIT_LIST_HEAD(&mqrq_prev->packed->list);
388
389out:
390 return ret;
391}
392
393void mmc_packed_clean(struct mmc_queue *mq)
394{
395 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
396 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
397
398 kfree(mqrq_cur->packed);
399 mqrq_cur->packed = NULL;
400 kfree(mqrq_prev->packed);
401 mqrq_prev->packed = NULL;
402}
403
1da177e4
LT
404/**
405 * mmc_queue_suspend - suspend a MMC request queue
406 * @mq: MMC queue to suspend
407 *
408 * Stop the block request queue, and wait for our thread to
409 * complete any outstanding requests. This ensures that we
410 * won't suspend while a request is being processed.
411 */
412void mmc_queue_suspend(struct mmc_queue *mq)
413{
165125e1 414 struct request_queue *q = mq->queue;
1da177e4
LT
415 unsigned long flags;
416
417 if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
418 mq->flags |= MMC_QUEUE_SUSPENDED;
419
420 spin_lock_irqsave(q->queue_lock, flags);
421 blk_stop_queue(q);
422 spin_unlock_irqrestore(q->queue_lock, flags);
423
424 down(&mq->thread_sem);
425 }
426}
1da177e4
LT
427
428/**
429 * mmc_queue_resume - resume a previously suspended MMC request queue
430 * @mq: MMC queue to resume
431 */
432void mmc_queue_resume(struct mmc_queue *mq)
433{
165125e1 434 struct request_queue *q = mq->queue;
1da177e4
LT
435 unsigned long flags;
436
437 if (mq->flags & MMC_QUEUE_SUSPENDED) {
438 mq->flags &= ~MMC_QUEUE_SUSPENDED;
439
440 up(&mq->thread_sem);
441
442 spin_lock_irqsave(q->queue_lock, flags);
443 blk_start_queue(q);
444 spin_unlock_irqrestore(q->queue_lock, flags);
445 }
446}
98ac2162 447
ce39f9d1
SJ
448static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
449 struct mmc_packed *packed,
450 struct scatterlist *sg,
451 enum mmc_packed_type cmd_type)
452{
453 struct scatterlist *__sg = sg;
454 unsigned int sg_len = 0;
455 struct request *req;
456
457 if (mmc_packed_wr(cmd_type)) {
458 unsigned int hdr_sz = mmc_large_sector(mq->card) ? 4096 : 512;
459 unsigned int max_seg_sz = queue_max_segment_size(mq->queue);
460 unsigned int len, remain, offset = 0;
461 u8 *buf = (u8 *)packed->cmd_hdr;
462
463 remain = hdr_sz;
464 do {
465 len = min(remain, max_seg_sz);
466 sg_set_buf(__sg, buf + offset, len);
467 offset += len;
468 remain -= len;
da81ed16 469 sg_unmark_end(__sg++);
ce39f9d1
SJ
470 sg_len++;
471 } while (remain);
472 }
473
474 list_for_each_entry(req, &packed->list, queuelist) {
475 sg_len += blk_rq_map_sg(mq->queue, req, __sg);
476 __sg = sg + (sg_len - 1);
da81ed16 477 sg_unmark_end(__sg++);
ce39f9d1
SJ
478 }
479 sg_mark_end(sg + (sg_len - 1));
480 return sg_len;
481}
482
2ff1fa67
PO
483/*
484 * Prepare the sg list(s) to be handed of to the host driver
485 */
97868a2b 486unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
98ccf149
PO
487{
488 unsigned int sg_len;
2ff1fa67
PO
489 size_t buflen;
490 struct scatterlist *sg;
ce39f9d1 491 enum mmc_packed_type cmd_type;
2ff1fa67 492 int i;
98ccf149 493
ce39f9d1
SJ
494 cmd_type = mqrq->cmd_type;
495
496 if (!mqrq->bounce_buf) {
497 if (mmc_packed_cmd(cmd_type))
498 return mmc_queue_packed_map_sg(mq, mqrq->packed,
499 mqrq->sg, cmd_type);
500 else
501 return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
502 }
98ccf149 503
97868a2b 504 BUG_ON(!mqrq->bounce_sg);
98ccf149 505
ce39f9d1
SJ
506 if (mmc_packed_cmd(cmd_type))
507 sg_len = mmc_queue_packed_map_sg(mq, mqrq->packed,
508 mqrq->bounce_sg, cmd_type);
509 else
510 sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
98ccf149 511
97868a2b 512 mqrq->bounce_sg_len = sg_len;
98ccf149 513
2ff1fa67 514 buflen = 0;
97868a2b 515 for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
2ff1fa67 516 buflen += sg->length;
98ccf149 517
97868a2b 518 sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
98ccf149
PO
519
520 return 1;
521}
522
2ff1fa67
PO
523/*
524 * If writing, bounce the data to the buffer before the request
525 * is sent to the host driver
526 */
97868a2b 527void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
98ccf149 528{
97868a2b 529 if (!mqrq->bounce_buf)
98ccf149
PO
530 return;
531
97868a2b 532 if (rq_data_dir(mqrq->req) != WRITE)
98ccf149
PO
533 return;
534
97868a2b
PF
535 sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
536 mqrq->bounce_buf, mqrq->sg[0].length);
98ccf149
PO
537}
538
2ff1fa67
PO
539/*
540 * If reading, bounce the data from the buffer after the request
541 * has been handled by the host driver
542 */
97868a2b 543void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
98ccf149 544{
97868a2b 545 if (!mqrq->bounce_buf)
98ccf149
PO
546 return;
547
97868a2b 548 if (rq_data_dir(mqrq->req) != READ)
98ccf149
PO
549 return;
550
97868a2b
PF
551 sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
552 mqrq->bounce_buf, mqrq->sg[0].length);
98ccf149 553}