mmc: sdhci: Change to new way of doing re-tuning
[linux-block.git] / drivers / mmc / core / core.c
CommitLineData
1da177e4 1/*
aaac1b47 2 * linux/drivers/mmc/core/core.c
1da177e4
LT
3 *
4 * Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5b4fd9ae 5 * SD support Copyright (C) 2004 Ian Molton, All Rights Reserved.
ad3868b2 6 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
bce40a36 7 * MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
1da177e4
LT
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
1da177e4
LT
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/interrupt.h>
16#include <linux/completion.h>
17#include <linux/device.h>
18#include <linux/delay.h>
19#include <linux/pagemap.h>
20#include <linux/err.h>
af8350c7 21#include <linux/leds.h>
b57c43ad 22#include <linux/scatterlist.h>
86e8286a 23#include <linux/log2.h>
5c13941a 24#include <linux/regulator/consumer.h>
e594573d 25#include <linux/pm_runtime.h>
bbd43682 26#include <linux/pm_wakeup.h>
35eb6db1 27#include <linux/suspend.h>
1b676f70
PF
28#include <linux/fault-inject.h>
29#include <linux/random.h>
950d56ac 30#include <linux/slab.h>
6e9e318b 31#include <linux/of.h>
1da177e4
LT
32
33#include <linux/mmc/card.h>
34#include <linux/mmc/host.h>
da7fbe58
PO
35#include <linux/mmc/mmc.h>
36#include <linux/mmc/sd.h>
740a221e 37#include <linux/mmc/slot-gpio.h>
1da177e4 38
aaac1b47 39#include "core.h"
ffce2e7e
PO
40#include "bus.h"
41#include "host.h"
e29a7d73 42#include "sdio_bus.h"
3aa8793f 43#include "pwrseq.h"
da7fbe58
PO
44
45#include "mmc_ops.h"
46#include "sd_ops.h"
5c4e6f13 47#include "sdio_ops.h"
1da177e4 48
8fee476b
TR
49/* If the device is not responding */
50#define MMC_CORE_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
51
950d56ac
JC
52/*
53 * Background operations can take a long time, depending on the housekeeping
54 * operations the card has to perform.
55 */
56#define MMC_BKOPS_MAX_TIMEOUT (4 * 60 * 1000) /* max time to wait in ms */
57
ffce2e7e 58static struct workqueue_struct *workqueue;
fa550189 59static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
ffce2e7e 60
af517150
DB
61/*
62 * Enabling software CRCs on the data blocks can be a significant (30%)
63 * performance cost, and for other reasons may not always be desired.
64 * So we allow it it to be disabled.
65 */
90ab5ee9 66bool use_spi_crc = 1;
af517150
DB
67module_param(use_spi_crc, bool, 0);
68
ffce2e7e
PO
69/*
70 * Internal function. Schedule delayed work in the MMC work queue.
71 */
72static int mmc_schedule_delayed_work(struct delayed_work *work,
73 unsigned long delay)
74{
75 return queue_delayed_work(workqueue, work, delay);
76}
77
78/*
79 * Internal function. Flush all scheduled work from the MMC work queue.
80 */
81static void mmc_flush_scheduled_work(void)
82{
83 flush_workqueue(workqueue);
84}
85
1b676f70
PF
86#ifdef CONFIG_FAIL_MMC_REQUEST
87
88/*
89 * Internal function. Inject random data errors.
90 * If mmc_data is NULL no errors are injected.
91 */
92static void mmc_should_fail_request(struct mmc_host *host,
93 struct mmc_request *mrq)
94{
95 struct mmc_command *cmd = mrq->cmd;
96 struct mmc_data *data = mrq->data;
97 static const int data_errors[] = {
98 -ETIMEDOUT,
99 -EILSEQ,
100 -EIO,
101 };
102
103 if (!data)
104 return;
105
106 if (cmd->error || data->error ||
107 !should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
108 return;
109
2e744fcb
AM
110 data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)];
111 data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9;
1b676f70
PF
112}
113
114#else /* CONFIG_FAIL_MMC_REQUEST */
115
116static inline void mmc_should_fail_request(struct mmc_host *host,
117 struct mmc_request *mrq)
118{
119}
120
121#endif /* CONFIG_FAIL_MMC_REQUEST */
122
1da177e4 123/**
fe10c6ab
RK
124 * mmc_request_done - finish processing an MMC request
125 * @host: MMC host which completed request
126 * @mrq: MMC request which request
1da177e4
LT
127 *
128 * MMC drivers should call this function when they have completed
fe10c6ab 129 * their processing of a request.
1da177e4
LT
130 */
131void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
132{
133 struct mmc_command *cmd = mrq->cmd;
920e70c5
RK
134 int err = cmd->error;
135
af517150
DB
136 if (err && cmd->retries && mmc_host_is_spi(host)) {
137 if (cmd->resp[0] & R1_SPI_ILLEGAL_COMMAND)
138 cmd->retries = 0;
139 }
140
d3049504 141 if (err && cmd->retries && !mmc_card_removed(host->card)) {
08a7e1df
AH
142 /*
143 * Request starter must handle retries - see
144 * mmc_wait_for_req_done().
145 */
146 if (mrq->done)
147 mrq->done(mrq);
e4d21708 148 } else {
1b676f70
PF
149 mmc_should_fail_request(host, mrq);
150
af8350c7
PO
151 led_trigger_event(host->led, LED_OFF);
152
fc75b708
AG
153 if (mrq->sbc) {
154 pr_debug("%s: req done <CMD%u>: %d: %08x %08x %08x %08x\n",
155 mmc_hostname(host), mrq->sbc->opcode,
156 mrq->sbc->error,
157 mrq->sbc->resp[0], mrq->sbc->resp[1],
158 mrq->sbc->resp[2], mrq->sbc->resp[3]);
159 }
160
e4d21708
PO
161 pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n",
162 mmc_hostname(host), cmd->opcode, err,
163 cmd->resp[0], cmd->resp[1],
164 cmd->resp[2], cmd->resp[3]);
165
166 if (mrq->data) {
167 pr_debug("%s: %d bytes transferred: %d\n",
168 mmc_hostname(host),
169 mrq->data->bytes_xfered, mrq->data->error);
170 }
171
172 if (mrq->stop) {
173 pr_debug("%s: (CMD%u): %d: %08x %08x %08x %08x\n",
174 mmc_hostname(host), mrq->stop->opcode,
175 mrq->stop->error,
176 mrq->stop->resp[0], mrq->stop->resp[1],
177 mrq->stop->resp[2], mrq->stop->resp[3]);
178 }
179
180 if (mrq->done)
181 mrq->done(mrq);
04566831 182
08c14071 183 mmc_host_clk_release(host);
1da177e4
LT
184 }
185}
186
187EXPORT_SYMBOL(mmc_request_done);
188
90a81489
AH
189static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
190{
191 int err;
192
193 /* Assumes host controller has been runtime resumed by mmc_claim_host */
194 err = mmc_retune(host);
195 if (err) {
196 mrq->cmd->error = err;
197 mmc_request_done(host, mrq);
198 return;
199 }
200
201 host->ops->request(host, mrq);
202}
203
f100c1c2 204static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
1da177e4 205{
976d9276
PO
206#ifdef CONFIG_MMC_DEBUG
207 unsigned int i, sz;
a84756c5 208 struct scatterlist *sg;
976d9276 209#endif
90a81489
AH
210 mmc_retune_hold(host);
211
f100c1c2
AH
212 if (mmc_card_removed(host->card))
213 return -ENOMEDIUM;
976d9276 214
7b2fd4f2
JC
215 if (mrq->sbc) {
216 pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
217 mmc_hostname(host), mrq->sbc->opcode,
218 mrq->sbc->arg, mrq->sbc->flags);
219 }
220
920e70c5
RK
221 pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
222 mmc_hostname(host), mrq->cmd->opcode,
223 mrq->cmd->arg, mrq->cmd->flags);
1da177e4 224
e4d21708
PO
225 if (mrq->data) {
226 pr_debug("%s: blksz %d blocks %d flags %08x "
227 "tsac %d ms nsac %d\n",
228 mmc_hostname(host), mrq->data->blksz,
229 mrq->data->blocks, mrq->data->flags,
ce252edd 230 mrq->data->timeout_ns / 1000000,
e4d21708
PO
231 mrq->data->timeout_clks);
232 }
233
234 if (mrq->stop) {
235 pr_debug("%s: CMD%u arg %08x flags %08x\n",
236 mmc_hostname(host), mrq->stop->opcode,
237 mrq->stop->arg, mrq->stop->flags);
238 }
239
f22ee4ed 240 WARN_ON(!host->claimed);
1da177e4
LT
241
242 mrq->cmd->error = 0;
243 mrq->cmd->mrq = mrq;
cce411e6
AG
244 if (mrq->sbc) {
245 mrq->sbc->error = 0;
246 mrq->sbc->mrq = mrq;
247 }
1da177e4 248 if (mrq->data) {
fe4a3c7a 249 BUG_ON(mrq->data->blksz > host->max_blk_size);
55db890a
PO
250 BUG_ON(mrq->data->blocks > host->max_blk_count);
251 BUG_ON(mrq->data->blocks * mrq->data->blksz >
252 host->max_req_size);
fe4a3c7a 253
976d9276
PO
254#ifdef CONFIG_MMC_DEBUG
255 sz = 0;
a84756c5
PO
256 for_each_sg(mrq->data->sg, sg, mrq->data->sg_len, i)
257 sz += sg->length;
976d9276
PO
258 BUG_ON(sz != mrq->data->blocks * mrq->data->blksz);
259#endif
260
1da177e4
LT
261 mrq->cmd->data = mrq->data;
262 mrq->data->error = 0;
263 mrq->data->mrq = mrq;
264 if (mrq->stop) {
265 mrq->data->stop = mrq->stop;
266 mrq->stop->error = 0;
267 mrq->stop->mrq = mrq;
268 }
269 }
08c14071 270 mmc_host_clk_hold(host);
66c036e0 271 led_trigger_event(host->led, LED_FULL);
90a81489 272 __mmc_start_request(host, mrq);
f100c1c2
AH
273
274 return 0;
1da177e4
LT
275}
276
950d56ac
JC
277/**
278 * mmc_start_bkops - start BKOPS for supported cards
279 * @card: MMC card to start BKOPS
280 * @form_exception: A flag to indicate if this function was
281 * called due to an exception raised by the card
282 *
283 * Start background operations whenever requested.
284 * When the urgent BKOPS bit is set in a R1 command response
285 * then background operations should be started immediately.
286*/
287void mmc_start_bkops(struct mmc_card *card, bool from_exception)
288{
289 int err;
290 int timeout;
291 bool use_busy_signal;
292
293 BUG_ON(!card);
294
0501be64 295 if (!card->ext_csd.man_bkops_en || mmc_card_doing_bkops(card))
950d56ac
JC
296 return;
297
298 err = mmc_read_bkops_status(card);
299 if (err) {
300 pr_err("%s: Failed to read bkops status: %d\n",
301 mmc_hostname(card->host), err);
302 return;
303 }
304
305 if (!card->ext_csd.raw_bkops_status)
306 return;
307
308 if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 &&
309 from_exception)
310 return;
311
312 mmc_claim_host(card->host);
313 if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
314 timeout = MMC_BKOPS_MAX_TIMEOUT;
315 use_busy_signal = true;
316 } else {
317 timeout = 0;
318 use_busy_signal = false;
319 }
320
66073d86
AH
321 mmc_retune_hold(card->host);
322
950d56ac 323 err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
4509f847
UH
324 EXT_CSD_BKOPS_START, 1, timeout,
325 use_busy_signal, true, false);
950d56ac
JC
326 if (err) {
327 pr_warn("%s: Error %d starting bkops\n",
328 mmc_hostname(card->host), err);
66073d86 329 mmc_retune_release(card->host);
950d56ac
JC
330 goto out;
331 }
332
333 /*
334 * For urgent bkops status (LEVEL_2 and more)
335 * bkops executed synchronously, otherwise
336 * the operation is in progress
337 */
338 if (!use_busy_signal)
339 mmc_card_set_doing_bkops(card);
66073d86
AH
340 else
341 mmc_retune_release(card->host);
950d56ac
JC
342out:
343 mmc_release_host(card->host);
344}
345EXPORT_SYMBOL(mmc_start_bkops);
346
2220eedf
KD
347/*
348 * mmc_wait_data_done() - done callback for data request
349 * @mrq: done data request
350 *
351 * Wakes up mmc context, passed as a callback to host controller driver
352 */
353static void mmc_wait_data_done(struct mmc_request *mrq)
354{
355 mrq->host->context_info.is_done_rcv = true;
356 wake_up_interruptible(&mrq->host->context_info.wait);
357}
358
1da177e4
LT
359static void mmc_wait_done(struct mmc_request *mrq)
360{
aa8b683a
PF
361 complete(&mrq->completion);
362}
363
2220eedf
KD
364/*
365 *__mmc_start_data_req() - starts data request
366 * @host: MMC host to start the request
367 * @mrq: data request to start
368 *
369 * Sets the done callback to be called when request is completed by the card.
370 * Starts data mmc request execution
371 */
372static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq)
373{
f100c1c2
AH
374 int err;
375
2220eedf
KD
376 mrq->done = mmc_wait_data_done;
377 mrq->host = host;
f100c1c2
AH
378
379 err = mmc_start_request(host, mrq);
380 if (err) {
381 mrq->cmd->error = err;
9b844961 382 mmc_wait_data_done(mrq);
2220eedf 383 }
2220eedf 384
f100c1c2 385 return err;
2220eedf
KD
386}
387
956d9fd5 388static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
aa8b683a 389{
f100c1c2
AH
390 int err;
391
aa8b683a
PF
392 init_completion(&mrq->completion);
393 mrq->done = mmc_wait_done;
f100c1c2
AH
394
395 err = mmc_start_request(host, mrq);
396 if (err) {
397 mrq->cmd->error = err;
d3049504 398 complete(&mrq->completion);
d3049504 399 }
f100c1c2
AH
400
401 return err;
aa8b683a
PF
402}
403
2220eedf
KD
404/*
405 * mmc_wait_for_data_req_done() - wait for request completed
406 * @host: MMC host to prepare the command.
407 * @mrq: MMC request to wait for
408 *
409 * Blocks MMC context till host controller will ack end of data request
410 * execution or new request notification arrives from the block layer.
411 * Handles command retries.
412 *
413 * Returns enum mmc_blk_status after checking errors.
414 */
415static int mmc_wait_for_data_req_done(struct mmc_host *host,
416 struct mmc_request *mrq,
417 struct mmc_async_req *next_req)
418{
419 struct mmc_command *cmd;
420 struct mmc_context_info *context_info = &host->context_info;
421 int err;
422 unsigned long flags;
423
424 while (1) {
425 wait_event_interruptible(context_info->wait,
426 (context_info->is_done_rcv ||
427 context_info->is_new_req));
428 spin_lock_irqsave(&context_info->lock, flags);
429 context_info->is_waiting_last_req = false;
430 spin_unlock_irqrestore(&context_info->lock, flags);
431 if (context_info->is_done_rcv) {
432 context_info->is_done_rcv = false;
433 context_info->is_new_req = false;
434 cmd = mrq->cmd;
775a9362 435
2220eedf
KD
436 if (!cmd->error || !cmd->retries ||
437 mmc_card_removed(host->card)) {
438 err = host->areq->err_check(host->card,
439 host->areq);
440 break; /* return err */
441 } else {
90a81489 442 mmc_retune_recheck(host);
2220eedf
KD
443 pr_info("%s: req failed (CMD%u): %d, retrying...\n",
444 mmc_hostname(host),
445 cmd->opcode, cmd->error);
446 cmd->retries--;
447 cmd->error = 0;
90a81489 448 __mmc_start_request(host, mrq);
2220eedf
KD
449 continue; /* wait for done/new event again */
450 }
451 } else if (context_info->is_new_req) {
452 context_info->is_new_req = false;
90a81489
AH
453 if (!next_req)
454 return MMC_BLK_NEW_REQUEST;
2220eedf
KD
455 }
456 }
90a81489 457 mmc_retune_release(host);
2220eedf
KD
458 return err;
459}
460
aa8b683a
PF
461static void mmc_wait_for_req_done(struct mmc_host *host,
462 struct mmc_request *mrq)
463{
08a7e1df
AH
464 struct mmc_command *cmd;
465
466 while (1) {
467 wait_for_completion(&mrq->completion);
468
469 cmd = mrq->cmd;
775a9362
ME
470
471 /*
472 * If host has timed out waiting for the sanitize
473 * to complete, card might be still in programming state
474 * so let's try to bring the card out of programming
475 * state.
476 */
477 if (cmd->sanitize_busy && cmd->error == -ETIMEDOUT) {
478 if (!mmc_interrupt_hpi(host->card)) {
6606110d
JP
479 pr_warn("%s: %s: Interrupted sanitize\n",
480 mmc_hostname(host), __func__);
775a9362
ME
481 cmd->error = 0;
482 break;
483 } else {
484 pr_err("%s: %s: Failed to interrupt sanitize\n",
485 mmc_hostname(host), __func__);
486 }
487 }
d3049504
AH
488 if (!cmd->error || !cmd->retries ||
489 mmc_card_removed(host->card))
08a7e1df
AH
490 break;
491
90a81489
AH
492 mmc_retune_recheck(host);
493
08a7e1df
AH
494 pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
495 mmc_hostname(host), cmd->opcode, cmd->error);
496 cmd->retries--;
497 cmd->error = 0;
90a81489 498 __mmc_start_request(host, mrq);
08a7e1df 499 }
90a81489
AH
500
501 mmc_retune_release(host);
aa8b683a
PF
502}
503
504/**
505 * mmc_pre_req - Prepare for a new request
506 * @host: MMC host to prepare command
507 * @mrq: MMC request to prepare for
508 * @is_first_req: true if there is no previous started request
509 * that may run in parellel to this call, otherwise false
510 *
511 * mmc_pre_req() is called in prior to mmc_start_req() to let
512 * host prepare for the new request. Preparation of a request may be
513 * performed while another request is running on the host.
514 */
515static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
516 bool is_first_req)
517{
2c4967f7
SRT
518 if (host->ops->pre_req) {
519 mmc_host_clk_hold(host);
aa8b683a 520 host->ops->pre_req(host, mrq, is_first_req);
2c4967f7
SRT
521 mmc_host_clk_release(host);
522 }
aa8b683a
PF
523}
524
525/**
526 * mmc_post_req - Post process a completed request
527 * @host: MMC host to post process command
528 * @mrq: MMC request to post process for
529 * @err: Error, if non zero, clean up any resources made in pre_req
530 *
531 * Let the host post process a completed request. Post processing of
532 * a request may be performed while another reuqest is running.
533 */
534static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
535 int err)
536{
2c4967f7
SRT
537 if (host->ops->post_req) {
538 mmc_host_clk_hold(host);
aa8b683a 539 host->ops->post_req(host, mrq, err);
2c4967f7
SRT
540 mmc_host_clk_release(host);
541 }
1da177e4
LT
542}
543
aa8b683a
PF
544/**
545 * mmc_start_req - start a non-blocking request
546 * @host: MMC host to start command
547 * @areq: async request to start
548 * @error: out parameter returns 0 for success, otherwise non zero
549 *
550 * Start a new MMC custom command request for a host.
551 * If there is on ongoing async request wait for completion
552 * of that request and start the new one and return.
553 * Does not wait for the new request to complete.
554 *
555 * Returns the completed request, NULL in case of none completed.
556 * Wait for the an ongoing request (previoulsy started) to complete and
557 * return the completed request. If there is no ongoing request, NULL
558 * is returned without waiting. NULL is not an error condition.
559 */
560struct mmc_async_req *mmc_start_req(struct mmc_host *host,
561 struct mmc_async_req *areq, int *error)
562{
563 int err = 0;
956d9fd5 564 int start_err = 0;
aa8b683a
PF
565 struct mmc_async_req *data = host->areq;
566
567 /* Prepare a new request */
568 if (areq)
569 mmc_pre_req(host, areq->mrq, !host->areq);
570
571 if (host->areq) {
f5c2758f
JC
572 err = mmc_wait_for_data_req_done(host, host->areq->mrq, areq);
573 if (err == MMC_BLK_NEW_REQUEST) {
574 if (error)
575 *error = err;
576 /*
577 * The previous request was not completed,
578 * nothing to return
579 */
580 return NULL;
581 }
950d56ac
JC
582 /*
583 * Check BKOPS urgency for each R1 response
584 */
585 if (host->card && mmc_card_mmc(host->card) &&
586 ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) ||
587 (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) &&
64b12a68
SK
588 (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT)) {
589
590 /* Cancel the prepared request */
591 if (areq)
592 mmc_post_req(host, areq->mrq, -EINVAL);
593
950d56ac 594 mmc_start_bkops(host->card, true);
64b12a68
SK
595
596 /* prepare the request again */
597 if (areq)
598 mmc_pre_req(host, areq->mrq, !host->areq);
599 }
aa8b683a
PF
600 }
601
956d9fd5 602 if (!err && areq)
2220eedf 603 start_err = __mmc_start_data_req(host, areq->mrq);
aa8b683a
PF
604
605 if (host->areq)
606 mmc_post_req(host, host->areq->mrq, 0);
607
956d9fd5
UH
608 /* Cancel a prepared request if it was not started. */
609 if ((err || start_err) && areq)
f5c2758f 610 mmc_post_req(host, areq->mrq, -EINVAL);
956d9fd5
UH
611
612 if (err)
613 host->areq = NULL;
614 else
615 host->areq = areq;
616
aa8b683a
PF
617 if (error)
618 *error = err;
619 return data;
620}
621EXPORT_SYMBOL(mmc_start_req);
622
67a61c48
PO
623/**
624 * mmc_wait_for_req - start a request and wait for completion
625 * @host: MMC host to start command
626 * @mrq: MMC request to start
627 *
628 * Start a new MMC custom command request for a host, and wait
629 * for the command to complete. Does not attempt to parse the
630 * response.
631 */
632void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
1da177e4 633{
aa8b683a
PF
634 __mmc_start_req(host, mrq);
635 mmc_wait_for_req_done(host, mrq);
1da177e4 636}
1da177e4
LT
637EXPORT_SYMBOL(mmc_wait_for_req);
638
eb0d8f13
JC
639/**
640 * mmc_interrupt_hpi - Issue for High priority Interrupt
641 * @card: the MMC card associated with the HPI transfer
642 *
643 * Issued High Priority Interrupt, and check for card status
950d56ac 644 * until out-of prg-state.
eb0d8f13
JC
645 */
646int mmc_interrupt_hpi(struct mmc_card *card)
647{
648 int err;
649 u32 status;
6af9e96e 650 unsigned long prg_wait;
eb0d8f13
JC
651
652 BUG_ON(!card);
653
654 if (!card->ext_csd.hpi_en) {
655 pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
656 return 1;
657 }
658
659 mmc_claim_host(card->host);
660 err = mmc_send_status(card, &status);
661 if (err) {
662 pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
663 goto out;
664 }
665
6af9e96e
V
666 switch (R1_CURRENT_STATE(status)) {
667 case R1_STATE_IDLE:
668 case R1_STATE_READY:
669 case R1_STATE_STBY:
211d4fe5 670 case R1_STATE_TRAN:
6af9e96e 671 /*
211d4fe5 672 * In idle and transfer states, HPI is not needed and the caller
6af9e96e
V
673 * can issue the next intended command immediately
674 */
675 goto out;
676 case R1_STATE_PRG:
677 break;
678 default:
679 /* In all other states, it's illegal to issue HPI */
680 pr_debug("%s: HPI cannot be sent. Card state=%d\n",
681 mmc_hostname(card->host), R1_CURRENT_STATE(status));
682 err = -EINVAL;
683 goto out;
684 }
685
686 err = mmc_send_hpi_cmd(card, &status);
687 if (err)
688 goto out;
689
690 prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
691 do {
692 err = mmc_send_status(card, &status);
693
694 if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
695 break;
696 if (time_after(jiffies, prg_wait))
697 err = -ETIMEDOUT;
698 } while (!err);
eb0d8f13
JC
699
700out:
701 mmc_release_host(card->host);
702 return err;
703}
704EXPORT_SYMBOL(mmc_interrupt_hpi);
705
1da177e4
LT
706/**
707 * mmc_wait_for_cmd - start a command and wait for completion
708 * @host: MMC host to start command
709 * @cmd: MMC command to start
710 * @retries: maximum number of retries
711 *
712 * Start a new MMC command for a host, and wait for the command
713 * to complete. Return any error that occurred while the command
714 * was executing. Do not attempt to parse the response.
715 */
716int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)
717{
ad5fd972 718 struct mmc_request mrq = {NULL};
1da177e4 719
d84075c8 720 WARN_ON(!host->claimed);
1da177e4 721
1da177e4
LT
722 memset(cmd->resp, 0, sizeof(cmd->resp));
723 cmd->retries = retries;
724
725 mrq.cmd = cmd;
726 cmd->data = NULL;
727
728 mmc_wait_for_req(host, &mrq);
729
730 return cmd->error;
731}
732
733EXPORT_SYMBOL(mmc_wait_for_cmd);
734
950d56ac
JC
735/**
736 * mmc_stop_bkops - stop ongoing BKOPS
737 * @card: MMC card to check BKOPS
738 *
739 * Send HPI command to stop ongoing background operations to
740 * allow rapid servicing of foreground operations, e.g. read/
741 * writes. Wait until the card comes out of the programming state
742 * to avoid errors in servicing read/write requests.
743 */
744int mmc_stop_bkops(struct mmc_card *card)
745{
746 int err = 0;
747
748 BUG_ON(!card);
749 err = mmc_interrupt_hpi(card);
750
751 /*
752 * If err is EINVAL, we can't issue an HPI.
753 * It should complete the BKOPS.
754 */
755 if (!err || (err == -EINVAL)) {
756 mmc_card_clr_doing_bkops(card);
66073d86 757 mmc_retune_release(card->host);
950d56ac
JC
758 err = 0;
759 }
760
761 return err;
762}
763EXPORT_SYMBOL(mmc_stop_bkops);
764
765int mmc_read_bkops_status(struct mmc_card *card)
766{
767 int err;
768 u8 *ext_csd;
769
950d56ac 770 mmc_claim_host(card->host);
b2cada73 771 err = mmc_get_ext_csd(card, &ext_csd);
950d56ac
JC
772 mmc_release_host(card->host);
773 if (err)
b2cada73 774 return err;
950d56ac
JC
775
776 card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
777 card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
950d56ac 778 kfree(ext_csd);
b2cada73 779 return 0;
950d56ac
JC
780}
781EXPORT_SYMBOL(mmc_read_bkops_status);
782
d773d725
RK
783/**
784 * mmc_set_data_timeout - set the timeout for a data command
785 * @data: data phase for command
786 * @card: the MMC card associated with the data transfer
67a61c48
PO
787 *
788 * Computes the data timeout parameters according to the
789 * correct algorithm given the card type.
d773d725 790 */
b146d26a 791void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
d773d725
RK
792{
793 unsigned int mult;
794
e6f918bf
PO
795 /*
796 * SDIO cards only define an upper 1 s limit on access.
797 */
798 if (mmc_card_sdio(card)) {
799 data->timeout_ns = 1000000000;
800 data->timeout_clks = 0;
801 return;
802 }
803
d773d725
RK
804 /*
805 * SD cards use a 100 multiplier rather than 10
806 */
807 mult = mmc_card_sd(card) ? 100 : 10;
808
809 /*
810 * Scale up the multiplier (and therefore the timeout) by
811 * the r2w factor for writes.
812 */
b146d26a 813 if (data->flags & MMC_DATA_WRITE)
d773d725
RK
814 mult <<= card->csd.r2w_factor;
815
816 data->timeout_ns = card->csd.tacc_ns * mult;
817 data->timeout_clks = card->csd.tacc_clks * mult;
818
819 /*
820 * SD cards also have an upper limit on the timeout.
821 */
822 if (mmc_card_sd(card)) {
823 unsigned int timeout_us, limit_us;
824
825 timeout_us = data->timeout_ns / 1000;
e9b86841
LW
826 if (mmc_host_clk_rate(card->host))
827 timeout_us += data->timeout_clks * 1000 /
828 (mmc_host_clk_rate(card->host) / 1000);
d773d725 829
b146d26a 830 if (data->flags & MMC_DATA_WRITE)
493890e7 831 /*
3bdc9ba8
PW
832 * The MMC spec "It is strongly recommended
833 * for hosts to implement more than 500ms
834 * timeout value even if the card indicates
835 * the 250ms maximum busy length." Even the
836 * previous value of 300ms is known to be
837 * insufficient for some cards.
493890e7 838 */
3bdc9ba8 839 limit_us = 3000000;
d773d725
RK
840 else
841 limit_us = 100000;
842
fba68bd2
PL
843 /*
844 * SDHC cards always use these fixed values.
845 */
846 if (timeout_us > limit_us || mmc_card_blockaddr(card)) {
d773d725
RK
847 data->timeout_ns = limit_us * 1000;
848 data->timeout_clks = 0;
849 }
f7bf11a3
SW
850
851 /* assign limit value if invalid */
852 if (timeout_us == 0)
853 data->timeout_ns = limit_us * 1000;
d773d725 854 }
6de5fc9c
SNX
855
856 /*
857 * Some cards require longer data read timeout than indicated in CSD.
858 * Address this by setting the read timeout to a "reasonably high"
859 * value. For the cards tested, 300ms has proven enough. If necessary,
860 * this value can be increased if other problematic cards require this.
861 */
862 if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
863 data->timeout_ns = 300000000;
864 data->timeout_clks = 0;
865 }
866
c0c88871
WM
867 /*
868 * Some cards need very high timeouts if driven in SPI mode.
869 * The worst observed timeout was 900ms after writing a
870 * continuous stream of data until the internal logic
871 * overflowed.
872 */
873 if (mmc_host_is_spi(card->host)) {
874 if (data->flags & MMC_DATA_WRITE) {
875 if (data->timeout_ns < 1000000000)
876 data->timeout_ns = 1000000000; /* 1s */
877 } else {
878 if (data->timeout_ns < 100000000)
879 data->timeout_ns = 100000000; /* 100ms */
880 }
881 }
d773d725
RK
882}
883EXPORT_SYMBOL(mmc_set_data_timeout);
884
ad3868b2
PO
885/**
886 * mmc_align_data_size - pads a transfer size to a more optimal value
887 * @card: the MMC card associated with the data transfer
888 * @sz: original transfer size
889 *
890 * Pads the original data size with a number of extra bytes in
891 * order to avoid controller bugs and/or performance hits
892 * (e.g. some controllers revert to PIO for certain sizes).
893 *
894 * Returns the improved size, which might be unmodified.
895 *
896 * Note that this function is only relevant when issuing a
897 * single scatter gather entry.
898 */
899unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
900{
901 /*
902 * FIXME: We don't have a system for the controller to tell
903 * the core about its problems yet, so for now we just 32-bit
904 * align the size.
905 */
906 sz = ((sz + 3) / 4) * 4;
907
908 return sz;
909}
910EXPORT_SYMBOL(mmc_align_data_size);
911
1da177e4 912/**
2342f332 913 * __mmc_claim_host - exclusively claim a host
1da177e4 914 * @host: mmc host to claim
2342f332 915 * @abort: whether or not the operation should be aborted
1da177e4 916 *
2342f332
NP
917 * Claim a host for a set of operations. If @abort is non null and
918 * dereference a non-zero value then this will return prematurely with
919 * that non-zero value without acquiring the lock. Returns zero
920 * with the lock held otherwise.
1da177e4 921 */
2342f332 922int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
1da177e4
LT
923{
924 DECLARE_WAITQUEUE(wait, current);
925 unsigned long flags;
2342f332 926 int stop;
9250aea7 927 bool pm = false;
1da177e4 928
cf795bfb
PO
929 might_sleep();
930
1da177e4
LT
931 add_wait_queue(&host->wq, &wait);
932 spin_lock_irqsave(&host->lock, flags);
933 while (1) {
934 set_current_state(TASK_UNINTERRUPTIBLE);
2342f332 935 stop = abort ? atomic_read(abort) : 0;
319a3f14 936 if (stop || !host->claimed || host->claimer == current)
1da177e4
LT
937 break;
938 spin_unlock_irqrestore(&host->lock, flags);
939 schedule();
940 spin_lock_irqsave(&host->lock, flags);
941 }
942 set_current_state(TASK_RUNNING);
319a3f14 943 if (!stop) {
2342f332 944 host->claimed = 1;
319a3f14
AH
945 host->claimer = current;
946 host->claim_cnt += 1;
9250aea7
UH
947 if (host->claim_cnt == 1)
948 pm = true;
319a3f14 949 } else
2342f332 950 wake_up(&host->wq);
1da177e4
LT
951 spin_unlock_irqrestore(&host->lock, flags);
952 remove_wait_queue(&host->wq, &wait);
9250aea7
UH
953
954 if (pm)
955 pm_runtime_get_sync(mmc_dev(host));
956
2342f332 957 return stop;
1da177e4 958}
2342f332 959EXPORT_SYMBOL(__mmc_claim_host);
8ea926b2 960
ab1efd27 961/**
907d2e7c 962 * mmc_release_host - release a host
ab1efd27
UH
963 * @host: mmc host to release
964 *
907d2e7c
AH
965 * Release a MMC host, allowing others to claim the host
966 * for their operations.
ab1efd27 967 */
907d2e7c 968void mmc_release_host(struct mmc_host *host)
8ea926b2
AH
969{
970 unsigned long flags;
971
907d2e7c
AH
972 WARN_ON(!host->claimed);
973
8ea926b2 974 spin_lock_irqsave(&host->lock, flags);
319a3f14
AH
975 if (--host->claim_cnt) {
976 /* Release for nested claim */
977 spin_unlock_irqrestore(&host->lock, flags);
978 } else {
979 host->claimed = 0;
980 host->claimer = NULL;
981 spin_unlock_irqrestore(&host->lock, flags);
982 wake_up(&host->wq);
9250aea7
UH
983 pm_runtime_mark_last_busy(mmc_dev(host));
984 pm_runtime_put_autosuspend(mmc_dev(host));
319a3f14 985 }
8ea926b2 986}
1da177e4
LT
987EXPORT_SYMBOL(mmc_release_host);
988
e94cfef6
UH
989/*
990 * This is a helper function, which fetches a runtime pm reference for the
991 * card device and also claims the host.
992 */
993void mmc_get_card(struct mmc_card *card)
994{
995 pm_runtime_get_sync(&card->dev);
996 mmc_claim_host(card->host);
997}
998EXPORT_SYMBOL(mmc_get_card);
999
1000/*
1001 * This is a helper function, which releases the host and drops the runtime
1002 * pm reference for the card device.
1003 */
1004void mmc_put_card(struct mmc_card *card)
1005{
1006 mmc_release_host(card->host);
1007 pm_runtime_mark_last_busy(&card->dev);
1008 pm_runtime_put_autosuspend(&card->dev);
1009}
1010EXPORT_SYMBOL(mmc_put_card);
1011
7ea239d9
PO
1012/*
1013 * Internal function that does the actual ios call to the host driver,
1014 * optionally printing some debug output.
1015 */
920e70c5
RK
1016static inline void mmc_set_ios(struct mmc_host *host)
1017{
1018 struct mmc_ios *ios = &host->ios;
1019
cd9277c0
PO
1020 pr_debug("%s: clock %uHz busmode %u powermode %u cs %u Vdd %u "
1021 "width %u timing %u\n",
920e70c5
RK
1022 mmc_hostname(host), ios->clock, ios->bus_mode,
1023 ios->power_mode, ios->chip_select, ios->vdd,
cd9277c0 1024 ios->bus_width, ios->timing);
fba68bd2 1025
04566831
LW
1026 if (ios->clock > 0)
1027 mmc_set_ungated(host);
920e70c5
RK
1028 host->ops->set_ios(host, ios);
1029}
1030
7ea239d9
PO
1031/*
1032 * Control chip select pin on a host.
1033 */
da7fbe58 1034void mmc_set_chip_select(struct mmc_host *host, int mode)
1da177e4 1035{
778e277c 1036 mmc_host_clk_hold(host);
da7fbe58
PO
1037 host->ios.chip_select = mode;
1038 mmc_set_ios(host);
778e277c 1039 mmc_host_clk_release(host);
1da177e4
LT
1040}
1041
7ea239d9
PO
1042/*
1043 * Sets the host clock to the highest possible frequency that
1044 * is below "hz".
1045 */
778e277c 1046static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
7ea239d9 1047{
6a98f1e8 1048 WARN_ON(hz && hz < host->f_min);
7ea239d9
PO
1049
1050 if (hz > host->f_max)
1051 hz = host->f_max;
1052
1053 host->ios.clock = hz;
1054 mmc_set_ios(host);
1055}
1056
778e277c
MW
1057void mmc_set_clock(struct mmc_host *host, unsigned int hz)
1058{
1059 mmc_host_clk_hold(host);
1060 __mmc_set_clock(host, hz);
1061 mmc_host_clk_release(host);
1062}
1063
04566831
LW
1064#ifdef CONFIG_MMC_CLKGATE
1065/*
1066 * This gates the clock by setting it to 0 Hz.
1067 */
1068void mmc_gate_clock(struct mmc_host *host)
1069{
1070 unsigned long flags;
1071
1072 spin_lock_irqsave(&host->clk_lock, flags);
1073 host->clk_old = host->ios.clock;
1074 host->ios.clock = 0;
1075 host->clk_gated = true;
1076 spin_unlock_irqrestore(&host->clk_lock, flags);
1077 mmc_set_ios(host);
1078}
1079
1080/*
1081 * This restores the clock from gating by using the cached
1082 * clock value.
1083 */
1084void mmc_ungate_clock(struct mmc_host *host)
1085{
1086 /*
1087 * We should previously have gated the clock, so the clock shall
1088 * be 0 here! The clock may however be 0 during initialization,
1089 * when some request operations are performed before setting
1090 * the frequency. When ungate is requested in that situation
1091 * we just ignore the call.
1092 */
1093 if (host->clk_old) {
1094 BUG_ON(host->ios.clock);
1095 /* This call will also set host->clk_gated to false */
778e277c 1096 __mmc_set_clock(host, host->clk_old);
04566831
LW
1097 }
1098}
1099
1100void mmc_set_ungated(struct mmc_host *host)
1101{
1102 unsigned long flags;
1103
1104 /*
1105 * We've been given a new frequency while the clock is gated,
1106 * so make sure we regard this as ungating it.
1107 */
1108 spin_lock_irqsave(&host->clk_lock, flags);
1109 host->clk_gated = false;
1110 spin_unlock_irqrestore(&host->clk_lock, flags);
1111}
1112
1113#else
1114void mmc_set_ungated(struct mmc_host *host)
1115{
1116}
1117#endif
1118
63e415c6
AH
1119int mmc_execute_tuning(struct mmc_card *card)
1120{
1121 struct mmc_host *host = card->host;
1122 u32 opcode;
1123 int err;
1124
1125 if (!host->ops->execute_tuning)
1126 return 0;
1127
1128 if (mmc_card_mmc(card))
1129 opcode = MMC_SEND_TUNING_BLOCK_HS200;
1130 else
1131 opcode = MMC_SEND_TUNING_BLOCK;
1132
1133 mmc_host_clk_hold(host);
1134 err = host->ops->execute_tuning(host, opcode);
1135 mmc_host_clk_release(host);
1136
1137 if (err)
1138 pr_err("%s: tuning execution failed\n", mmc_hostname(host));
79d5a65a
AH
1139 else
1140 mmc_retune_enable(host);
63e415c6
AH
1141
1142 return err;
1143}
1144
7ea239d9
PO
1145/*
1146 * Change the bus mode (open drain/push-pull) of a host.
1147 */
1148void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
1149{
778e277c 1150 mmc_host_clk_hold(host);
7ea239d9
PO
1151 host->ios.bus_mode = mode;
1152 mmc_set_ios(host);
778e277c 1153 mmc_host_clk_release(host);
7ea239d9
PO
1154}
1155
0f8d8ea6
AH
1156/*
1157 * Change data bus width of a host.
1158 */
1159void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
1160{
778e277c 1161 mmc_host_clk_hold(host);
4c4cb171
PR
1162 host->ios.bus_width = width;
1163 mmc_set_ios(host);
778e277c 1164 mmc_host_clk_release(host);
0f8d8ea6
AH
1165}
1166
2d079c43
JR
1167/*
1168 * Set initial state after a power cycle or a hw_reset.
1169 */
1170void mmc_set_initial_state(struct mmc_host *host)
1171{
79d5a65a
AH
1172 mmc_retune_disable(host);
1173
2d079c43
JR
1174 if (mmc_host_is_spi(host))
1175 host->ios.chip_select = MMC_CS_HIGH;
1176 else
1177 host->ios.chip_select = MMC_CS_DONTCARE;
1178 host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
1179 host->ios.bus_width = MMC_BUS_WIDTH_1;
1180 host->ios.timing = MMC_TIMING_LEGACY;
1181
1182 mmc_set_ios(host);
1183}
1184
86e8286a
AV
1185/**
1186 * mmc_vdd_to_ocrbitnum - Convert a voltage to the OCR bit number
1187 * @vdd: voltage (mV)
1188 * @low_bits: prefer low bits in boundary cases
1189 *
1190 * This function returns the OCR bit number according to the provided @vdd
1191 * value. If conversion is not possible a negative errno value returned.
1192 *
1193 * Depending on the @low_bits flag the function prefers low or high OCR bits
1194 * on boundary voltages. For example,
1195 * with @low_bits = true, 3300 mV translates to ilog2(MMC_VDD_32_33);
1196 * with @low_bits = false, 3300 mV translates to ilog2(MMC_VDD_33_34);
1197 *
1198 * Any value in the [1951:1999] range translates to the ilog2(MMC_VDD_20_21).
1199 */
1200static int mmc_vdd_to_ocrbitnum(int vdd, bool low_bits)
1201{
1202 const int max_bit = ilog2(MMC_VDD_35_36);
1203 int bit;
1204
1205 if (vdd < 1650 || vdd > 3600)
1206 return -EINVAL;
1207
1208 if (vdd >= 1650 && vdd <= 1950)
1209 return ilog2(MMC_VDD_165_195);
1210
1211 if (low_bits)
1212 vdd -= 1;
1213
1214 /* Base 2000 mV, step 100 mV, bit's base 8. */
1215 bit = (vdd - 2000) / 100 + 8;
1216 if (bit > max_bit)
1217 return max_bit;
1218 return bit;
1219}
1220
1221/**
1222 * mmc_vddrange_to_ocrmask - Convert a voltage range to the OCR mask
1223 * @vdd_min: minimum voltage value (mV)
1224 * @vdd_max: maximum voltage value (mV)
1225 *
1226 * This function returns the OCR mask bits according to the provided @vdd_min
1227 * and @vdd_max values. If conversion is not possible the function returns 0.
1228 *
1229 * Notes wrt boundary cases:
1230 * This function sets the OCR bits for all boundary voltages, for example
1231 * [3300:3400] range is translated to MMC_VDD_32_33 | MMC_VDD_33_34 |
1232 * MMC_VDD_34_35 mask.
1233 */
1234u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)
1235{
1236 u32 mask = 0;
1237
1238 if (vdd_max < vdd_min)
1239 return 0;
1240
1241 /* Prefer high bits for the boundary vdd_max values. */
1242 vdd_max = mmc_vdd_to_ocrbitnum(vdd_max, false);
1243 if (vdd_max < 0)
1244 return 0;
1245
1246 /* Prefer low bits for the boundary vdd_min values. */
1247 vdd_min = mmc_vdd_to_ocrbitnum(vdd_min, true);
1248 if (vdd_min < 0)
1249 return 0;
1250
1251 /* Fill the mask, from max bit to min bit. */
1252 while (vdd_max >= vdd_min)
1253 mask |= 1 << vdd_max--;
1254
1255 return mask;
1256}
1257EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
1258
6e9e318b
HZ
1259#ifdef CONFIG_OF
1260
1261/**
1262 * mmc_of_parse_voltage - return mask of supported voltages
1263 * @np: The device node need to be parsed.
1264 * @mask: mask of voltages available for MMC/SD/SDIO
1265 *
1266 * 1. Return zero on success.
1267 * 2. Return negative errno: voltage-range is invalid.
1268 */
1269int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
1270{
1271 const u32 *voltage_ranges;
1272 int num_ranges, i;
1273
1274 voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
1275 num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
1276 if (!voltage_ranges || !num_ranges) {
1277 pr_info("%s: voltage-ranges unspecified\n", np->full_name);
1278 return -EINVAL;
1279 }
1280
1281 for (i = 0; i < num_ranges; i++) {
1282 const int j = i * 2;
1283 u32 ocr_mask;
1284
1285 ocr_mask = mmc_vddrange_to_ocrmask(
1286 be32_to_cpu(voltage_ranges[j]),
1287 be32_to_cpu(voltage_ranges[j + 1]));
1288 if (!ocr_mask) {
1289 pr_err("%s: voltage-range #%d is invalid\n",
1290 np->full_name, i);
1291 return -EINVAL;
1292 }
1293 *mask |= ocr_mask;
1294 }
1295
1296 return 0;
1297}
1298EXPORT_SYMBOL(mmc_of_parse_voltage);
1299
1300#endif /* CONFIG_OF */
1301
25185f3f
SH
1302static int mmc_of_get_func_num(struct device_node *node)
1303{
1304 u32 reg;
1305 int ret;
1306
1307 ret = of_property_read_u32(node, "reg", &reg);
1308 if (ret < 0)
1309 return ret;
1310
1311 return reg;
1312}
1313
1314struct device_node *mmc_of_find_child_device(struct mmc_host *host,
1315 unsigned func_num)
1316{
1317 struct device_node *node;
1318
1319 if (!host->parent || !host->parent->of_node)
1320 return NULL;
1321
1322 for_each_child_of_node(host->parent->of_node, node) {
1323 if (mmc_of_get_func_num(node) == func_num)
1324 return node;
1325 }
1326
1327 return NULL;
1328}
1329
5c13941a
DB
1330#ifdef CONFIG_REGULATOR
1331
1332/**
1333 * mmc_regulator_get_ocrmask - return mask of supported voltages
1334 * @supply: regulator to use
1335 *
1336 * This returns either a negative errno, or a mask of voltages that
1337 * can be provided to MMC/SD/SDIO devices using the specified voltage
1338 * regulator. This would normally be called before registering the
1339 * MMC host adapter.
1340 */
1341int mmc_regulator_get_ocrmask(struct regulator *supply)
1342{
1343 int result = 0;
1344 int count;
1345 int i;
9ed7ca89
JMC
1346 int vdd_uV;
1347 int vdd_mV;
5c13941a
DB
1348
1349 count = regulator_count_voltages(supply);
1350 if (count < 0)
1351 return count;
1352
1353 for (i = 0; i < count; i++) {
5c13941a
DB
1354 vdd_uV = regulator_list_voltage(supply, i);
1355 if (vdd_uV <= 0)
1356 continue;
1357
1358 vdd_mV = vdd_uV / 1000;
1359 result |= mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1360 }
1361
9ed7ca89
JMC
1362 if (!result) {
1363 vdd_uV = regulator_get_voltage(supply);
1364 if (vdd_uV <= 0)
1365 return vdd_uV;
1366
1367 vdd_mV = vdd_uV / 1000;
1368 result = mmc_vddrange_to_ocrmask(vdd_mV, vdd_mV);
1369 }
1370
5c13941a
DB
1371 return result;
1372}
45a6b32e 1373EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask);
5c13941a
DB
1374
1375/**
1376 * mmc_regulator_set_ocr - set regulator to match host->ios voltage
99fc5131 1377 * @mmc: the host to regulate
5c13941a 1378 * @supply: regulator to use
99fc5131 1379 * @vdd_bit: zero for power off, else a bit number (host->ios.vdd)
5c13941a
DB
1380 *
1381 * Returns zero on success, else negative errno.
1382 *
1383 * MMC host drivers may use this to enable or disable a regulator using
1384 * a particular supply voltage. This would normally be called from the
1385 * set_ios() method.
1386 */
99fc5131
LW
1387int mmc_regulator_set_ocr(struct mmc_host *mmc,
1388 struct regulator *supply,
1389 unsigned short vdd_bit)
5c13941a
DB
1390{
1391 int result = 0;
1392 int min_uV, max_uV;
5c13941a
DB
1393
1394 if (vdd_bit) {
1395 int tmp;
5c13941a 1396
9cde5b7a
CB
1397 /*
1398 * REVISIT mmc_vddrange_to_ocrmask() may have set some
5c13941a
DB
1399 * bits this regulator doesn't quite support ... don't
1400 * be too picky, most cards and regulators are OK with
1401 * a 0.1V range goof (it's a small error percentage).
1402 */
1403 tmp = vdd_bit - ilog2(MMC_VDD_165_195);
1404 if (tmp == 0) {
1405 min_uV = 1650 * 1000;
1406 max_uV = 1950 * 1000;
1407 } else {
1408 min_uV = 1900 * 1000 + tmp * 100 * 1000;
1409 max_uV = min_uV + 100 * 1000;
1410 }
1411
ca6429d4 1412 result = regulator_set_voltage(supply, min_uV, max_uV);
99fc5131 1413 if (result == 0 && !mmc->regulator_enabled) {
5c13941a 1414 result = regulator_enable(supply);
99fc5131
LW
1415 if (!result)
1416 mmc->regulator_enabled = true;
1417 }
1418 } else if (mmc->regulator_enabled) {
5c13941a 1419 result = regulator_disable(supply);
99fc5131
LW
1420 if (result == 0)
1421 mmc->regulator_enabled = false;
5c13941a
DB
1422 }
1423
99fc5131
LW
1424 if (result)
1425 dev_err(mmc_dev(mmc),
1426 "could not set regulator OCR (%d)\n", result);
5c13941a
DB
1427 return result;
1428}
45a6b32e 1429EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
5c13941a 1430
4d1f52f9
TK
1431#endif /* CONFIG_REGULATOR */
1432
e137788d
GL
1433int mmc_regulator_get_supply(struct mmc_host *mmc)
1434{
1435 struct device *dev = mmc_dev(mmc);
e137788d
GL
1436 int ret;
1437
4d1f52f9 1438 mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc");
bc35d5ed 1439 mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc");
e137788d 1440
4d1f52f9
TK
1441 if (IS_ERR(mmc->supply.vmmc)) {
1442 if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER)
1443 return -EPROBE_DEFER;
1444 dev_info(dev, "No vmmc regulator found\n");
1445 } else {
1446 ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc);
1447 if (ret > 0)
1448 mmc->ocr_avail = ret;
1449 else
1450 dev_warn(dev, "Failed getting OCR mask: %d\n", ret);
1451 }
e137788d 1452
4d1f52f9
TK
1453 if (IS_ERR(mmc->supply.vqmmc)) {
1454 if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER)
1455 return -EPROBE_DEFER;
1456 dev_info(dev, "No vqmmc regulator found\n");
1457 }
e137788d
GL
1458
1459 return 0;
1460}
1461EXPORT_SYMBOL_GPL(mmc_regulator_get_supply);
1462
1da177e4
LT
1463/*
1464 * Mask off any voltages we don't support and select
1465 * the lowest voltage
1466 */
7ea239d9 1467u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)
1da177e4
LT
1468{
1469 int bit;
1470
726d6f23
UH
1471 /*
1472 * Sanity check the voltages that the card claims to
1473 * support.
1474 */
1475 if (ocr & 0x7F) {
1476 dev_warn(mmc_dev(host),
1477 "card claims to support voltages below defined range\n");
1478 ocr &= ~0x7F;
1479 }
1480
1da177e4 1481 ocr &= host->ocr_avail;
ce69d37b
UH
1482 if (!ocr) {
1483 dev_warn(mmc_dev(host), "no support for card's volts\n");
1484 return 0;
1485 }
1da177e4 1486
ce69d37b
UH
1487 if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) {
1488 bit = ffs(ocr) - 1;
63ef731a 1489 ocr &= 3 << bit;
ce69d37b 1490 mmc_power_cycle(host, ocr);
1da177e4 1491 } else {
ce69d37b
UH
1492 bit = fls(ocr) - 1;
1493 ocr &= 3 << bit;
1494 if (bit != host->ios.vdd)
1495 dev_warn(mmc_dev(host), "exceeding card's volts\n");
1da177e4
LT
1496 }
1497
1498 return ocr;
1499}
1500
567c8903
JR
1501int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
1502{
1503 int err = 0;
1504 int old_signal_voltage = host->ios.signal_voltage;
1505
1506 host->ios.signal_voltage = signal_voltage;
1507 if (host->ops->start_signal_voltage_switch) {
1508 mmc_host_clk_hold(host);
1509 err = host->ops->start_signal_voltage_switch(host, &host->ios);
1510 mmc_host_clk_release(host);
1511 }
1512
1513 if (err)
1514 host->ios.signal_voltage = old_signal_voltage;
1515
1516 return err;
1517
1518}
1519
0f791fda 1520int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr)
f2119df6
AN
1521{
1522 struct mmc_command cmd = {0};
1523 int err = 0;
0797e5f1 1524 u32 clock;
f2119df6
AN
1525
1526 BUG_ON(!host);
1527
1528 /*
1529 * Send CMD11 only if the request is to switch the card to
1530 * 1.8V signalling.
1531 */
0797e5f1
JR
1532 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
1533 return __mmc_set_signal_voltage(host, signal_voltage);
f2119df6 1534
0797e5f1
JR
1535 /*
1536 * If we cannot switch voltages, return failure so the caller
1537 * can continue without UHS mode
1538 */
1539 if (!host->ops->start_signal_voltage_switch)
1540 return -EPERM;
1541 if (!host->ops->card_busy)
6606110d
JP
1542 pr_warn("%s: cannot verify signal voltage switch\n",
1543 mmc_hostname(host));
0797e5f1 1544
c6eb5880
VY
1545 mmc_host_clk_hold(host);
1546
0797e5f1
JR
1547 cmd.opcode = SD_SWITCH_VOLTAGE;
1548 cmd.arg = 0;
1549 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1550
1551 err = mmc_wait_for_cmd(host, &cmd, 0);
1552 if (err)
c6eb5880 1553 goto err_command;
0797e5f1 1554
c6eb5880
VY
1555 if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) {
1556 err = -EIO;
1557 goto err_command;
1558 }
0797e5f1
JR
1559 /*
1560 * The card should drive cmd and dat[0:3] low immediately
1561 * after the response of cmd11, but wait 1 ms to be sure
1562 */
1563 mmc_delay(1);
1564 if (host->ops->card_busy && !host->ops->card_busy(host)) {
1565 err = -EAGAIN;
1566 goto power_cycle;
1567 }
1568 /*
1569 * During a signal voltage level switch, the clock must be gated
1570 * for 5 ms according to the SD spec
1571 */
1572 clock = host->ios.clock;
1573 host->ios.clock = 0;
1574 mmc_set_ios(host);
f2119df6 1575
0797e5f1
JR
1576 if (__mmc_set_signal_voltage(host, signal_voltage)) {
1577 /*
1578 * Voltages may not have been switched, but we've already
1579 * sent CMD11, so a power cycle is required anyway
1580 */
1581 err = -EAGAIN;
1582 goto power_cycle;
f2119df6
AN
1583 }
1584
0797e5f1
JR
1585 /* Keep clock gated for at least 5 ms */
1586 mmc_delay(5);
1587 host->ios.clock = clock;
1588 mmc_set_ios(host);
1589
1590 /* Wait for at least 1 ms according to spec */
1591 mmc_delay(1);
1592
1593 /*
1594 * Failure to switch is indicated by the card holding
1595 * dat[0:3] low
1596 */
1597 if (host->ops->card_busy && host->ops->card_busy(host))
1598 err = -EAGAIN;
1599
1600power_cycle:
1601 if (err) {
1602 pr_debug("%s: Signal voltage switch failed, "
1603 "power cycling card\n", mmc_hostname(host));
0f791fda 1604 mmc_power_cycle(host, ocr);
0797e5f1
JR
1605 }
1606
c6eb5880 1607err_command:
0797e5f1
JR
1608 mmc_host_clk_release(host);
1609
1610 return err;
f2119df6
AN
1611}
1612
b57c43ad 1613/*
7ea239d9 1614 * Select timing parameters for host.
b57c43ad 1615 */
7ea239d9 1616void mmc_set_timing(struct mmc_host *host, unsigned int timing)
b57c43ad 1617{
778e277c 1618 mmc_host_clk_hold(host);
7ea239d9
PO
1619 host->ios.timing = timing;
1620 mmc_set_ios(host);
778e277c 1621 mmc_host_clk_release(host);
b57c43ad
PO
1622}
1623
d6d50a15
AN
1624/*
1625 * Select appropriate driver type for host.
1626 */
1627void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
1628{
778e277c 1629 mmc_host_clk_hold(host);
d6d50a15
AN
1630 host->ios.drv_type = drv_type;
1631 mmc_set_ios(host);
778e277c 1632 mmc_host_clk_release(host);
d6d50a15
AN
1633}
1634
1da177e4 1635/*
45f8245b
RK
1636 * Apply power to the MMC stack. This is a two-stage process.
1637 * First, we enable power to the card without the clock running.
1638 * We then wait a bit for the power to stabilise. Finally,
1639 * enable the bus drivers and clock to the card.
1640 *
1641 * We must _NOT_ enable the clock prior to power stablising.
1642 *
1643 * If a host does all the power sequencing itself, ignore the
1644 * initial MMC_POWER_UP stage.
1da177e4 1645 */
4a065193 1646void mmc_power_up(struct mmc_host *host, u32 ocr)
1da177e4 1647{
fa550189
UH
1648 if (host->ios.power_mode == MMC_POWER_ON)
1649 return;
1650
778e277c
MW
1651 mmc_host_clk_hold(host);
1652
3aa8793f
UH
1653 mmc_pwrseq_pre_power_on(host);
1654
4a065193 1655 host->ios.vdd = fls(ocr) - 1;
1da177e4 1656 host->ios.power_mode = MMC_POWER_UP;
2d079c43
JR
1657 /* Set initial state and call mmc_set_ios */
1658 mmc_set_initial_state(host);
1da177e4 1659
ceae98f2
TK
1660 /* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */
1661 if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330) == 0)
1662 dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n");
1663 else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180) == 0)
1664 dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n");
1665 else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120) == 0)
1666 dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n");
108ecc4c 1667
f9996aee
PO
1668 /*
1669 * This delay should be sufficient to allow the power supply
1670 * to reach the minimum voltage.
1671 */
79bccc5a 1672 mmc_delay(10);
1da177e4 1673
4febb7e2
UH
1674 mmc_pwrseq_post_power_on(host);
1675
88ae8b86 1676 host->ios.clock = host->f_init;
8dfd0374 1677
1da177e4 1678 host->ios.power_mode = MMC_POWER_ON;
920e70c5 1679 mmc_set_ios(host);
1da177e4 1680
f9996aee
PO
1681 /*
1682 * This delay must be at least 74 clock sizes, or 1 ms, or the
1683 * time required to reach a stable voltage.
1684 */
79bccc5a 1685 mmc_delay(10);
778e277c
MW
1686
1687 mmc_host_clk_release(host);
1da177e4
LT
1688}
1689
7f7e4129 1690void mmc_power_off(struct mmc_host *host)
1da177e4 1691{
fa550189
UH
1692 if (host->ios.power_mode == MMC_POWER_OFF)
1693 return;
1694
778e277c
MW
1695 mmc_host_clk_hold(host);
1696
3aa8793f
UH
1697 mmc_pwrseq_power_off(host);
1698
1da177e4
LT
1699 host->ios.clock = 0;
1700 host->ios.vdd = 0;
b33d46c3 1701
1da177e4 1702 host->ios.power_mode = MMC_POWER_OFF;
2d079c43
JR
1703 /* Set initial state and call mmc_set_ios */
1704 mmc_set_initial_state(host);
778e277c 1705
041beb1d
DD
1706 /*
1707 * Some configurations, such as the 802.11 SDIO card in the OLPC
1708 * XO-1.5, require a short delay after poweroff before the card
1709 * can be successfully turned on again.
1710 */
1711 mmc_delay(1);
1712
778e277c 1713 mmc_host_clk_release(host);
1da177e4
LT
1714}
1715
4a065193 1716void mmc_power_cycle(struct mmc_host *host, u32 ocr)
276e090f
JR
1717{
1718 mmc_power_off(host);
1719 /* Wait at least 1 ms according to SD spec */
1720 mmc_delay(1);
4a065193 1721 mmc_power_up(host, ocr);
276e090f
JR
1722}
1723
39361851
AB
1724/*
1725 * Cleanup when the last reference to the bus operator is dropped.
1726 */
261172fd 1727static void __mmc_release_bus(struct mmc_host *host)
39361851
AB
1728{
1729 BUG_ON(!host);
1730 BUG_ON(host->bus_refs);
1731 BUG_ON(!host->bus_dead);
1732
1733 host->bus_ops = NULL;
1734}
1735
1736/*
1737 * Increase reference count of bus operator
1738 */
1739static inline void mmc_bus_get(struct mmc_host *host)
1740{
1741 unsigned long flags;
1742
1743 spin_lock_irqsave(&host->lock, flags);
1744 host->bus_refs++;
1745 spin_unlock_irqrestore(&host->lock, flags);
1746}
1747
1748/*
1749 * Decrease reference count of bus operator and free it if
1750 * it is the last reference.
1751 */
1752static inline void mmc_bus_put(struct mmc_host *host)
1753{
1754 unsigned long flags;
1755
1756 spin_lock_irqsave(&host->lock, flags);
1757 host->bus_refs--;
1758 if ((host->bus_refs == 0) && host->bus_ops)
1759 __mmc_release_bus(host);
1760 spin_unlock_irqrestore(&host->lock, flags);
1761}
1762
1da177e4 1763/*
7ea239d9
PO
1764 * Assign a mmc bus handler to a host. Only one bus handler may control a
1765 * host at any given time.
1da177e4 1766 */
7ea239d9 1767void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)
1da177e4 1768{
7ea239d9 1769 unsigned long flags;
e45a1bd2 1770
7ea239d9
PO
1771 BUG_ON(!host);
1772 BUG_ON(!ops);
b855885e 1773
d84075c8 1774 WARN_ON(!host->claimed);
bce40a36 1775
7ea239d9 1776 spin_lock_irqsave(&host->lock, flags);
bce40a36 1777
7ea239d9
PO
1778 BUG_ON(host->bus_ops);
1779 BUG_ON(host->bus_refs);
b57c43ad 1780
7ea239d9
PO
1781 host->bus_ops = ops;
1782 host->bus_refs = 1;
1783 host->bus_dead = 0;
b57c43ad 1784
7ea239d9 1785 spin_unlock_irqrestore(&host->lock, flags);
b57c43ad
PO
1786}
1787
7ea239d9 1788/*
7f7e4129 1789 * Remove the current bus handler from a host.
7ea239d9
PO
1790 */
1791void mmc_detach_bus(struct mmc_host *host)
7ccd266e 1792{
7ea239d9 1793 unsigned long flags;
7ccd266e 1794
7ea239d9 1795 BUG_ON(!host);
7ccd266e 1796
d84075c8
PO
1797 WARN_ON(!host->claimed);
1798 WARN_ON(!host->bus_ops);
cd9277c0 1799
7ea239d9 1800 spin_lock_irqsave(&host->lock, flags);
7ccd266e 1801
7ea239d9 1802 host->bus_dead = 1;
7ccd266e 1803
7ea239d9 1804 spin_unlock_irqrestore(&host->lock, flags);
1da177e4 1805
7ea239d9 1806 mmc_bus_put(host);
1da177e4
LT
1807}
1808
bbd43682
UH
1809static void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
1810 bool cd_irq)
1811{
1812#ifdef CONFIG_MMC_DEBUG
1813 unsigned long flags;
1814 spin_lock_irqsave(&host->lock, flags);
1815 WARN_ON(host->removed);
1816 spin_unlock_irqrestore(&host->lock, flags);
1817#endif
1818
1819 /*
1820 * If the device is configured as wakeup, we prevent a new sleep for
1821 * 5 s to give provision for user space to consume the event.
1822 */
1823 if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL) &&
1824 device_can_wakeup(mmc_dev(host)))
1825 pm_wakeup_event(mmc_dev(host), 5000);
1826
1827 host->detect_change = 1;
1828 mmc_schedule_delayed_work(&host->detect, delay);
1829}
1830
1da177e4
LT
1831/**
1832 * mmc_detect_change - process change of state on a MMC socket
1833 * @host: host which changed state.
8dc00335 1834 * @delay: optional delay to wait before detection (jiffies)
1da177e4 1835 *
67a61c48
PO
1836 * MMC drivers should call this when they detect a card has been
1837 * inserted or removed. The MMC layer will confirm that any
1838 * present card is still functional, and initialize any newly
1839 * inserted.
1da177e4 1840 */
8dc00335 1841void mmc_detect_change(struct mmc_host *host, unsigned long delay)
1da177e4 1842{
bbd43682 1843 _mmc_detect_change(host, delay, true);
1da177e4 1844}
1da177e4
LT
1845EXPORT_SYMBOL(mmc_detect_change);
1846
dfe86cba
AH
1847void mmc_init_erase(struct mmc_card *card)
1848{
1849 unsigned int sz;
1850
1851 if (is_power_of_2(card->erase_size))
1852 card->erase_shift = ffs(card->erase_size) - 1;
1853 else
1854 card->erase_shift = 0;
1855
1856 /*
1857 * It is possible to erase an arbitrarily large area of an SD or MMC
1858 * card. That is not desirable because it can take a long time
1859 * (minutes) potentially delaying more important I/O, and also the
1860 * timeout calculations become increasingly hugely over-estimated.
1861 * Consequently, 'pref_erase' is defined as a guide to limit erases
1862 * to that size and alignment.
1863 *
1864 * For SD cards that define Allocation Unit size, limit erases to one
1865 * Allocation Unit at a time. For MMC cards that define High Capacity
1866 * Erase Size, whether it is switched on or not, limit to that size.
1867 * Otherwise just have a stab at a good value. For modern cards it
1868 * will end up being 4MiB. Note that if the value is too small, it
1869 * can end up taking longer to erase.
1870 */
1871 if (mmc_card_sd(card) && card->ssr.au) {
1872 card->pref_erase = card->ssr.au;
1873 card->erase_shift = ffs(card->ssr.au) - 1;
1874 } else if (card->ext_csd.hc_erase_size) {
1875 card->pref_erase = card->ext_csd.hc_erase_size;
cc8aa7de 1876 } else if (card->erase_size) {
dfe86cba
AH
1877 sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11;
1878 if (sz < 128)
1879 card->pref_erase = 512 * 1024 / 512;
1880 else if (sz < 512)
1881 card->pref_erase = 1024 * 1024 / 512;
1882 else if (sz < 1024)
1883 card->pref_erase = 2 * 1024 * 1024 / 512;
1884 else
1885 card->pref_erase = 4 * 1024 * 1024 / 512;
1886 if (card->pref_erase < card->erase_size)
1887 card->pref_erase = card->erase_size;
1888 else {
1889 sz = card->pref_erase % card->erase_size;
1890 if (sz)
1891 card->pref_erase += card->erase_size - sz;
1892 }
cc8aa7de
CD
1893 } else
1894 card->pref_erase = 0;
dfe86cba
AH
1895}
1896
eaa02f75
AW
1897static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
1898 unsigned int arg, unsigned int qty)
dfe86cba
AH
1899{
1900 unsigned int erase_timeout;
1901
7194efb8
AH
1902 if (arg == MMC_DISCARD_ARG ||
1903 (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) {
1904 erase_timeout = card->ext_csd.trim_timeout;
1905 } else if (card->ext_csd.erase_group_def & 1) {
dfe86cba
AH
1906 /* High Capacity Erase Group Size uses HC timeouts */
1907 if (arg == MMC_TRIM_ARG)
1908 erase_timeout = card->ext_csd.trim_timeout;
1909 else
1910 erase_timeout = card->ext_csd.hc_erase_timeout;
1911 } else {
1912 /* CSD Erase Group Size uses write timeout */
1913 unsigned int mult = (10 << card->csd.r2w_factor);
1914 unsigned int timeout_clks = card->csd.tacc_clks * mult;
1915 unsigned int timeout_us;
1916
1917 /* Avoid overflow: e.g. tacc_ns=80000000 mult=1280 */
1918 if (card->csd.tacc_ns < 1000000)
1919 timeout_us = (card->csd.tacc_ns * mult) / 1000;
1920 else
1921 timeout_us = (card->csd.tacc_ns / 1000) * mult;
1922
1923 /*
1924 * ios.clock is only a target. The real clock rate might be
1925 * less but not that much less, so fudge it by multiplying by 2.
1926 */
1927 timeout_clks <<= 1;
1928 timeout_us += (timeout_clks * 1000) /
4cf8c6dd 1929 (mmc_host_clk_rate(card->host) / 1000);
dfe86cba
AH
1930
1931 erase_timeout = timeout_us / 1000;
1932
1933 /*
1934 * Theoretically, the calculation could underflow so round up
1935 * to 1ms in that case.
1936 */
1937 if (!erase_timeout)
1938 erase_timeout = 1;
1939 }
1940
1941 /* Multiplier for secure operations */
1942 if (arg & MMC_SECURE_ARGS) {
1943 if (arg == MMC_SECURE_ERASE_ARG)
1944 erase_timeout *= card->ext_csd.sec_erase_mult;
1945 else
1946 erase_timeout *= card->ext_csd.sec_trim_mult;
1947 }
1948
1949 erase_timeout *= qty;
1950
1951 /*
1952 * Ensure at least a 1 second timeout for SPI as per
1953 * 'mmc_set_data_timeout()'
1954 */
1955 if (mmc_host_is_spi(card->host) && erase_timeout < 1000)
1956 erase_timeout = 1000;
1957
eaa02f75 1958 return erase_timeout;
dfe86cba
AH
1959}
1960
eaa02f75
AW
1961static unsigned int mmc_sd_erase_timeout(struct mmc_card *card,
1962 unsigned int arg,
1963 unsigned int qty)
dfe86cba 1964{
eaa02f75
AW
1965 unsigned int erase_timeout;
1966
dfe86cba
AH
1967 if (card->ssr.erase_timeout) {
1968 /* Erase timeout specified in SD Status Register (SSR) */
eaa02f75
AW
1969 erase_timeout = card->ssr.erase_timeout * qty +
1970 card->ssr.erase_offset;
dfe86cba
AH
1971 } else {
1972 /*
1973 * Erase timeout not specified in SD Status Register (SSR) so
1974 * use 250ms per write block.
1975 */
eaa02f75 1976 erase_timeout = 250 * qty;
dfe86cba
AH
1977 }
1978
1979 /* Must not be less than 1 second */
eaa02f75
AW
1980 if (erase_timeout < 1000)
1981 erase_timeout = 1000;
1982
1983 return erase_timeout;
dfe86cba
AH
1984}
1985
eaa02f75
AW
1986static unsigned int mmc_erase_timeout(struct mmc_card *card,
1987 unsigned int arg,
1988 unsigned int qty)
dfe86cba
AH
1989{
1990 if (mmc_card_sd(card))
eaa02f75 1991 return mmc_sd_erase_timeout(card, arg, qty);
dfe86cba 1992 else
eaa02f75 1993 return mmc_mmc_erase_timeout(card, arg, qty);
dfe86cba
AH
1994}
1995
1996static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1997 unsigned int to, unsigned int arg)
1998{
1278dba1 1999 struct mmc_command cmd = {0};
dfe86cba 2000 unsigned int qty = 0;
8fee476b 2001 unsigned long timeout;
dfe86cba
AH
2002 int err;
2003
8f11d106
AH
2004 mmc_retune_hold(card->host);
2005
dfe86cba
AH
2006 /*
2007 * qty is used to calculate the erase timeout which depends on how many
2008 * erase groups (or allocation units in SD terminology) are affected.
2009 * We count erasing part of an erase group as one erase group.
2010 * For SD, the allocation units are always a power of 2. For MMC, the
2011 * erase group size is almost certainly also power of 2, but it does not
2012 * seem to insist on that in the JEDEC standard, so we fall back to
2013 * division in that case. SD may not specify an allocation unit size,
2014 * in which case the timeout is based on the number of write blocks.
2015 *
2016 * Note that the timeout for secure trim 2 will only be correct if the
2017 * number of erase groups specified is the same as the total of all
2018 * preceding secure trim 1 commands. Since the power may have been
2019 * lost since the secure trim 1 commands occurred, it is generally
2020 * impossible to calculate the secure trim 2 timeout correctly.
2021 */
2022 if (card->erase_shift)
2023 qty += ((to >> card->erase_shift) -
2024 (from >> card->erase_shift)) + 1;
2025 else if (mmc_card_sd(card))
2026 qty += to - from + 1;
2027 else
2028 qty += ((to / card->erase_size) -
2029 (from / card->erase_size)) + 1;
2030
2031 if (!mmc_card_blockaddr(card)) {
2032 from <<= 9;
2033 to <<= 9;
2034 }
2035
dfe86cba
AH
2036 if (mmc_card_sd(card))
2037 cmd.opcode = SD_ERASE_WR_BLK_START;
2038 else
2039 cmd.opcode = MMC_ERASE_GROUP_START;
2040 cmd.arg = from;
2041 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2042 err = mmc_wait_for_cmd(card->host, &cmd, 0);
2043 if (err) {
a3c76eb9 2044 pr_err("mmc_erase: group start error %d, "
dfe86cba 2045 "status %#x\n", err, cmd.resp[0]);
67716327 2046 err = -EIO;
dfe86cba
AH
2047 goto out;
2048 }
2049
2050 memset(&cmd, 0, sizeof(struct mmc_command));
2051 if (mmc_card_sd(card))
2052 cmd.opcode = SD_ERASE_WR_BLK_END;
2053 else
2054 cmd.opcode = MMC_ERASE_GROUP_END;
2055 cmd.arg = to;
2056 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2057 err = mmc_wait_for_cmd(card->host, &cmd, 0);
2058 if (err) {
a3c76eb9 2059 pr_err("mmc_erase: group end error %d, status %#x\n",
dfe86cba 2060 err, cmd.resp[0]);
67716327 2061 err = -EIO;
dfe86cba
AH
2062 goto out;
2063 }
2064
2065 memset(&cmd, 0, sizeof(struct mmc_command));
2066 cmd.opcode = MMC_ERASE;
2067 cmd.arg = arg;
2068 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
1d4d7744 2069 cmd.busy_timeout = mmc_erase_timeout(card, arg, qty);
dfe86cba
AH
2070 err = mmc_wait_for_cmd(card->host, &cmd, 0);
2071 if (err) {
a3c76eb9 2072 pr_err("mmc_erase: erase error %d, status %#x\n",
dfe86cba
AH
2073 err, cmd.resp[0]);
2074 err = -EIO;
2075 goto out;
2076 }
2077
2078 if (mmc_host_is_spi(card->host))
2079 goto out;
2080
8fee476b 2081 timeout = jiffies + msecs_to_jiffies(MMC_CORE_TIMEOUT_MS);
dfe86cba
AH
2082 do {
2083 memset(&cmd, 0, sizeof(struct mmc_command));
2084 cmd.opcode = MMC_SEND_STATUS;
2085 cmd.arg = card->rca << 16;
2086 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
2087 /* Do not retry else we can't see errors */
2088 err = mmc_wait_for_cmd(card->host, &cmd, 0);
2089 if (err || (cmd.resp[0] & 0xFDF92000)) {
a3c76eb9 2090 pr_err("error %d requesting status %#x\n",
dfe86cba
AH
2091 err, cmd.resp[0]);
2092 err = -EIO;
2093 goto out;
2094 }
8fee476b
TR
2095
2096 /* Timeout if the device never becomes ready for data and
2097 * never leaves the program state.
2098 */
2099 if (time_after(jiffies, timeout)) {
2100 pr_err("%s: Card stuck in programming state! %s\n",
2101 mmc_hostname(card->host), __func__);
2102 err = -EIO;
2103 goto out;
2104 }
2105
dfe86cba 2106 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
8fee476b 2107 (R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG));
dfe86cba 2108out:
8f11d106 2109 mmc_retune_release(card->host);
dfe86cba
AH
2110 return err;
2111}
2112
2113/**
2114 * mmc_erase - erase sectors.
2115 * @card: card to erase
2116 * @from: first sector to erase
2117 * @nr: number of sectors to erase
2118 * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
2119 *
2120 * Caller must claim host before calling this function.
2121 */
2122int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
2123 unsigned int arg)
2124{
2125 unsigned int rem, to = from + nr;
2126
2127 if (!(card->host->caps & MMC_CAP_ERASE) ||
2128 !(card->csd.cmdclass & CCC_ERASE))
2129 return -EOPNOTSUPP;
2130
2131 if (!card->erase_size)
2132 return -EOPNOTSUPP;
2133
2134 if (mmc_card_sd(card) && arg != MMC_ERASE_ARG)
2135 return -EOPNOTSUPP;
2136
2137 if ((arg & MMC_SECURE_ARGS) &&
2138 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN))
2139 return -EOPNOTSUPP;
2140
2141 if ((arg & MMC_TRIM_ARGS) &&
2142 !(card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN))
2143 return -EOPNOTSUPP;
2144
2145 if (arg == MMC_SECURE_ERASE_ARG) {
2146 if (from % card->erase_size || nr % card->erase_size)
2147 return -EINVAL;
2148 }
2149
2150 if (arg == MMC_ERASE_ARG) {
2151 rem = from % card->erase_size;
2152 if (rem) {
2153 rem = card->erase_size - rem;
2154 from += rem;
2155 if (nr > rem)
2156 nr -= rem;
2157 else
2158 return 0;
2159 }
2160 rem = nr % card->erase_size;
2161 if (rem)
2162 nr -= rem;
2163 }
2164
2165 if (nr == 0)
2166 return 0;
2167
2168 to = from + nr;
2169
2170 if (to <= from)
2171 return -EINVAL;
2172
2173 /* 'from' and 'to' are inclusive */
2174 to -= 1;
2175
2176 return mmc_do_erase(card, from, to, arg);
2177}
2178EXPORT_SYMBOL(mmc_erase);
2179
2180int mmc_can_erase(struct mmc_card *card)
2181{
2182 if ((card->host->caps & MMC_CAP_ERASE) &&
2183 (card->csd.cmdclass & CCC_ERASE) && card->erase_size)
2184 return 1;
2185 return 0;
2186}
2187EXPORT_SYMBOL(mmc_can_erase);
2188
2189int mmc_can_trim(struct mmc_card *card)
2190{
2191 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)
2192 return 1;
2193 return 0;
2194}
2195EXPORT_SYMBOL(mmc_can_trim);
2196
b3bf9153
KP
2197int mmc_can_discard(struct mmc_card *card)
2198{
2199 /*
2200 * As there's no way to detect the discard support bit at v4.5
2201 * use the s/w feature support filed.
2202 */
2203 if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
2204 return 1;
2205 return 0;
2206}
2207EXPORT_SYMBOL(mmc_can_discard);
2208
d9ddd629
KP
2209int mmc_can_sanitize(struct mmc_card *card)
2210{
28302812
AH
2211 if (!mmc_can_trim(card) && !mmc_can_erase(card))
2212 return 0;
d9ddd629
KP
2213 if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
2214 return 1;
2215 return 0;
2216}
2217EXPORT_SYMBOL(mmc_can_sanitize);
2218
dfe86cba
AH
2219int mmc_can_secure_erase_trim(struct mmc_card *card)
2220{
5204d00f
LC
2221 if ((card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) &&
2222 !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
dfe86cba
AH
2223 return 1;
2224 return 0;
2225}
2226EXPORT_SYMBOL(mmc_can_secure_erase_trim);
2227
2228int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
2229 unsigned int nr)
2230{
2231 if (!card->erase_size)
2232 return 0;
2233 if (from % card->erase_size || nr % card->erase_size)
2234 return 0;
2235 return 1;
2236}
2237EXPORT_SYMBOL(mmc_erase_group_aligned);
1da177e4 2238
e056a1b5
AH
2239static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
2240 unsigned int arg)
2241{
2242 struct mmc_host *host = card->host;
2243 unsigned int max_discard, x, y, qty = 0, max_qty, timeout;
2244 unsigned int last_timeout = 0;
2245
2246 if (card->erase_shift)
2247 max_qty = UINT_MAX >> card->erase_shift;
2248 else if (mmc_card_sd(card))
2249 max_qty = UINT_MAX;
2250 else
2251 max_qty = UINT_MAX / card->erase_size;
2252
2253 /* Find the largest qty with an OK timeout */
2254 do {
2255 y = 0;
2256 for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
2257 timeout = mmc_erase_timeout(card, arg, qty + x);
68eb80e0 2258 if (timeout > host->max_busy_timeout)
e056a1b5
AH
2259 break;
2260 if (timeout < last_timeout)
2261 break;
2262 last_timeout = timeout;
2263 y = x;
2264 }
2265 qty += y;
2266 } while (y);
2267
2268 if (!qty)
2269 return 0;
2270
2271 if (qty == 1)
2272 return 1;
2273
2274 /* Convert qty to sectors */
2275 if (card->erase_shift)
2276 max_discard = --qty << card->erase_shift;
2277 else if (mmc_card_sd(card))
2278 max_discard = qty;
2279 else
2280 max_discard = --qty * card->erase_size;
2281
2282 return max_discard;
2283}
2284
2285unsigned int mmc_calc_max_discard(struct mmc_card *card)
2286{
2287 struct mmc_host *host = card->host;
2288 unsigned int max_discard, max_trim;
2289
68eb80e0 2290 if (!host->max_busy_timeout)
e056a1b5
AH
2291 return UINT_MAX;
2292
2293 /*
2294 * Without erase_group_def set, MMC erase timeout depends on clock
2295 * frequence which can change. In that case, the best choice is
2296 * just the preferred erase size.
2297 */
2298 if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
2299 return card->pref_erase;
2300
2301 max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
2302 if (mmc_can_trim(card)) {
2303 max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
2304 if (max_trim < max_discard)
2305 max_discard = max_trim;
2306 } else if (max_discard < card->erase_size) {
2307 max_discard = 0;
2308 }
2309 pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
68eb80e0 2310 mmc_hostname(host), max_discard, host->max_busy_timeout);
e056a1b5
AH
2311 return max_discard;
2312}
2313EXPORT_SYMBOL(mmc_calc_max_discard);
2314
0f8d8ea6
AH
2315int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
2316{
1278dba1 2317 struct mmc_command cmd = {0};
0f8d8ea6 2318
cdc99179 2319 if (mmc_card_blockaddr(card) || mmc_card_ddr52(card))
0f8d8ea6
AH
2320 return 0;
2321
0f8d8ea6
AH
2322 cmd.opcode = MMC_SET_BLOCKLEN;
2323 cmd.arg = blocklen;
2324 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2325 return mmc_wait_for_cmd(card->host, &cmd, 5);
2326}
2327EXPORT_SYMBOL(mmc_set_blocklen);
2328
67c79db8
LP
2329int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount,
2330 bool is_rel_write)
2331{
2332 struct mmc_command cmd = {0};
2333
2334 cmd.opcode = MMC_SET_BLOCK_COUNT;
2335 cmd.arg = blockcount & 0x0000FFFF;
2336 if (is_rel_write)
2337 cmd.arg |= 1 << 31;
2338 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
2339 return mmc_wait_for_cmd(card->host, &cmd, 5);
2340}
2341EXPORT_SYMBOL(mmc_set_blockcount);
2342
b2499518
AH
2343static void mmc_hw_reset_for_init(struct mmc_host *host)
2344{
2345 if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
2346 return;
2347 mmc_host_clk_hold(host);
2348 host->ops->hw_reset(host);
2349 mmc_host_clk_release(host);
2350}
2351
83533ab2 2352int mmc_hw_reset(struct mmc_host *host)
b2499518 2353{
f855a371 2354 int ret;
b2499518 2355
f855a371 2356 if (!host->card)
b2499518
AH
2357 return -EINVAL;
2358
f855a371
JR
2359 mmc_bus_get(host);
2360 if (!host->bus_ops || host->bus_dead || !host->bus_ops->reset) {
2361 mmc_bus_put(host);
b2499518 2362 return -EOPNOTSUPP;
b2499518
AH
2363 }
2364
f855a371
JR
2365 ret = host->bus_ops->reset(host);
2366 mmc_bus_put(host);
b2499518 2367
f855a371 2368 pr_warn("%s: tried to reset card\n", mmc_hostname(host));
b2499518 2369
f855a371 2370 return ret;
b2499518 2371}
b2499518
AH
2372EXPORT_SYMBOL(mmc_hw_reset);
2373
807e8e40
AR
2374static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
2375{
2376 host->f_init = freq;
2377
2378#ifdef CONFIG_MMC_DEBUG
2379 pr_info("%s: %s: trying to init card at %u Hz\n",
2380 mmc_hostname(host), __func__, host->f_init);
2381#endif
4a065193 2382 mmc_power_up(host, host->ocr_avail);
2f94e55a 2383
b2499518
AH
2384 /*
2385 * Some eMMCs (with VCCQ always on) may not be reset after power up, so
2386 * do a hardware reset if possible.
2387 */
2388 mmc_hw_reset_for_init(host);
2389
2f94e55a
PR
2390 /*
2391 * sdio_reset sends CMD52 to reset card. Since we do not know
2392 * if the card is being re-initialized, just send it. CMD52
2393 * should be ignored by SD/eMMC cards.
2394 */
807e8e40
AR
2395 sdio_reset(host);
2396 mmc_go_idle(host);
2397
2398 mmc_send_if_cond(host, host->ocr_avail);
2399
2400 /* Order's important: probe SDIO, then SD, then MMC */
2401 if (!mmc_attach_sdio(host))
2402 return 0;
2403 if (!mmc_attach_sd(host))
2404 return 0;
2405 if (!mmc_attach_mmc(host))
2406 return 0;
2407
2408 mmc_power_off(host);
2409 return -EIO;
2410}
2411
d3049504
AH
2412int _mmc_detect_card_removed(struct mmc_host *host)
2413{
2414 int ret;
2415
5601aaf7 2416 if (host->caps & MMC_CAP_NONREMOVABLE)
d3049504
AH
2417 return 0;
2418
2419 if (!host->card || mmc_card_removed(host->card))
2420 return 1;
2421
2422 ret = host->bus_ops->alive(host);
1450734e
KL
2423
2424 /*
2425 * Card detect status and alive check may be out of sync if card is
2426 * removed slowly, when card detect switch changes while card/slot
2427 * pads are still contacted in hardware (refer to "SD Card Mechanical
2428 * Addendum, Appendix C: Card Detection Switch"). So reschedule a
2429 * detect work 200ms later for this case.
2430 */
2431 if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) {
2432 mmc_detect_change(host, msecs_to_jiffies(200));
2433 pr_debug("%s: card removed too slowly\n", mmc_hostname(host));
2434 }
2435
d3049504
AH
2436 if (ret) {
2437 mmc_card_set_removed(host->card);
2438 pr_debug("%s: card remove detected\n", mmc_hostname(host));
2439 }
2440
2441 return ret;
2442}
2443
2444int mmc_detect_card_removed(struct mmc_host *host)
2445{
2446 struct mmc_card *card = host->card;
f0cc9cf9 2447 int ret;
d3049504
AH
2448
2449 WARN_ON(!host->claimed);
f0cc9cf9
UH
2450
2451 if (!card)
2452 return 1;
2453
2454 ret = mmc_card_removed(card);
d3049504
AH
2455 /*
2456 * The card will be considered unchanged unless we have been asked to
2457 * detect a change or host requires polling to provide card detection.
2458 */
b6891679 2459 if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL))
f0cc9cf9 2460 return ret;
d3049504
AH
2461
2462 host->detect_change = 0;
f0cc9cf9
UH
2463 if (!ret) {
2464 ret = _mmc_detect_card_removed(host);
b6891679 2465 if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) {
f0cc9cf9
UH
2466 /*
2467 * Schedule a detect work as soon as possible to let a
2468 * rescan handle the card removal.
2469 */
2470 cancel_delayed_work(&host->detect);
bbd43682 2471 _mmc_detect_change(host, 0, false);
f0cc9cf9
UH
2472 }
2473 }
d3049504 2474
f0cc9cf9 2475 return ret;
d3049504
AH
2476}
2477EXPORT_SYMBOL(mmc_detect_card_removed);
2478
b93931a6 2479void mmc_rescan(struct work_struct *work)
1da177e4 2480{
c4028958
DH
2481 struct mmc_host *host =
2482 container_of(work, struct mmc_host, detect.work);
88ae8b86 2483 int i;
4c2ef25f 2484
fa372a51
MM
2485 if (host->trigger_card_event && host->ops->card_event) {
2486 host->ops->card_event(host);
2487 host->trigger_card_event = false;
2488 }
2489
807e8e40 2490 if (host->rescan_disable)
4c2ef25f 2491 return;
1da177e4 2492
3339d1e3
JR
2493 /* If there is a non-removable card registered, only scan once */
2494 if ((host->caps & MMC_CAP_NONREMOVABLE) && host->rescan_entered)
2495 return;
2496 host->rescan_entered = 1;
2497
7ea239d9 2498 mmc_bus_get(host);
b855885e 2499
30201e7f
OBC
2500 /*
2501 * if there is a _removable_ card registered, check whether it is
2502 * still present
2503 */
5601aaf7 2504 if (host->bus_ops && !host->bus_dead
bad3baba 2505 && !(host->caps & MMC_CAP_NONREMOVABLE))
94d89efb
JS
2506 host->bus_ops->detect(host);
2507
d3049504
AH
2508 host->detect_change = 0;
2509
c5841798
CB
2510 /*
2511 * Let mmc_bus_put() free the bus/bus_ops if we've found that
2512 * the card is no longer present.
2513 */
94d89efb 2514 mmc_bus_put(host);
94d89efb
JS
2515 mmc_bus_get(host);
2516
2517 /* if there still is a card present, stop here */
2518 if (host->bus_ops != NULL) {
7ea239d9 2519 mmc_bus_put(host);
94d89efb
JS
2520 goto out;
2521 }
1da177e4 2522
94d89efb
JS
2523 /*
2524 * Only we can add a new handler, so it's safe to
2525 * release the lock here.
2526 */
2527 mmc_bus_put(host);
1da177e4 2528
c1b55bfc
SH
2529 if (!(host->caps & MMC_CAP_NONREMOVABLE) && host->ops->get_cd &&
2530 host->ops->get_cd(host) == 0) {
fa550189
UH
2531 mmc_claim_host(host);
2532 mmc_power_off(host);
2533 mmc_release_host(host);
94d89efb 2534 goto out;
fa550189 2535 }
1da177e4 2536
807e8e40 2537 mmc_claim_host(host);
88ae8b86 2538 for (i = 0; i < ARRAY_SIZE(freqs); i++) {
807e8e40
AR
2539 if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
2540 break;
06b2233a 2541 if (freqs[i] <= host->f_min)
807e8e40 2542 break;
88ae8b86 2543 }
807e8e40
AR
2544 mmc_release_host(host);
2545
2546 out:
28f52482
AV
2547 if (host->caps & MMC_CAP_NEEDS_POLL)
2548 mmc_schedule_delayed_work(&host->detect, HZ);
1da177e4
LT
2549}
2550
b93931a6 2551void mmc_start_host(struct mmc_host *host)
1da177e4 2552{
fa550189 2553 host->f_init = max(freqs[0], host->f_min);
d9adcc12 2554 host->rescan_disable = 0;
8af465db 2555 host->ios.power_mode = MMC_POWER_UNDEFINED;
a08b17be
AH
2556 if (host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)
2557 mmc_power_off(host);
2558 else
4a065193 2559 mmc_power_up(host, host->ocr_avail);
740a221e 2560 mmc_gpiod_request_cd_irq(host);
bbd43682 2561 _mmc_detect_change(host, 0, false);
1da177e4
LT
2562}
2563
b93931a6 2564void mmc_stop_host(struct mmc_host *host)
1da177e4 2565{
3b91e550 2566#ifdef CONFIG_MMC_DEBUG
1efd48b3
PO
2567 unsigned long flags;
2568 spin_lock_irqsave(&host->lock, flags);
3b91e550 2569 host->removed = 1;
1efd48b3 2570 spin_unlock_irqrestore(&host->lock, flags);
3b91e550 2571#endif
740a221e
AH
2572 if (host->slot.cd_irq >= 0)
2573 disable_irq(host->slot.cd_irq);
3b91e550 2574
d9adcc12 2575 host->rescan_disable = 1;
d9bcbf34 2576 cancel_delayed_work_sync(&host->detect);
3b91e550
PO
2577 mmc_flush_scheduled_work();
2578
da68c4eb
NP
2579 /* clear pm flags now and let card drivers set them as needed */
2580 host->pm_flags = 0;
2581
7ea239d9
PO
2582 mmc_bus_get(host);
2583 if (host->bus_ops && !host->bus_dead) {
0db13fc2 2584 /* Calling bus_ops->remove() with a claimed host can deadlock */
58a8a4a1 2585 host->bus_ops->remove(host);
7ea239d9
PO
2586 mmc_claim_host(host);
2587 mmc_detach_bus(host);
7f7e4129 2588 mmc_power_off(host);
7ea239d9 2589 mmc_release_host(host);
53509f0f
DK
2590 mmc_bus_put(host);
2591 return;
1da177e4 2592 }
7ea239d9
PO
2593 mmc_bus_put(host);
2594
2595 BUG_ON(host->card);
1da177e4
LT
2596
2597 mmc_power_off(host);
2598}
2599
12ae637f 2600int mmc_power_save_host(struct mmc_host *host)
eae1aeee 2601{
12ae637f
OBC
2602 int ret = 0;
2603
bb9cab94
DD
2604#ifdef CONFIG_MMC_DEBUG
2605 pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__);
2606#endif
2607
eae1aeee
AH
2608 mmc_bus_get(host);
2609
5601aaf7 2610 if (!host->bus_ops || host->bus_dead) {
eae1aeee 2611 mmc_bus_put(host);
12ae637f 2612 return -EINVAL;
eae1aeee
AH
2613 }
2614
2615 if (host->bus_ops->power_save)
12ae637f 2616 ret = host->bus_ops->power_save(host);
eae1aeee
AH
2617
2618 mmc_bus_put(host);
2619
2620 mmc_power_off(host);
12ae637f
OBC
2621
2622 return ret;
eae1aeee
AH
2623}
2624EXPORT_SYMBOL(mmc_power_save_host);
2625
12ae637f 2626int mmc_power_restore_host(struct mmc_host *host)
eae1aeee 2627{
12ae637f
OBC
2628 int ret;
2629
bb9cab94
DD
2630#ifdef CONFIG_MMC_DEBUG
2631 pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__);
2632#endif
2633
eae1aeee
AH
2634 mmc_bus_get(host);
2635
5601aaf7 2636 if (!host->bus_ops || host->bus_dead) {
eae1aeee 2637 mmc_bus_put(host);
12ae637f 2638 return -EINVAL;
eae1aeee
AH
2639 }
2640
69041150 2641 mmc_power_up(host, host->card->ocr);
12ae637f 2642 ret = host->bus_ops->power_restore(host);
eae1aeee
AH
2643
2644 mmc_bus_put(host);
12ae637f
OBC
2645
2646 return ret;
eae1aeee
AH
2647}
2648EXPORT_SYMBOL(mmc_power_restore_host);
2649
881d1c25
SJ
2650/*
2651 * Flush the cache to the non-volatile storage.
2652 */
2653int mmc_flush_cache(struct mmc_card *card)
2654{
881d1c25
SJ
2655 int err = 0;
2656
881d1c25
SJ
2657 if (mmc_card_mmc(card) &&
2658 (card->ext_csd.cache_size > 0) &&
2659 (card->ext_csd.cache_ctrl & 1)) {
2660 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
2661 EXT_CSD_FLUSH_CACHE, 1, 0);
2662 if (err)
2663 pr_err("%s: cache flush error %d\n",
2664 mmc_hostname(card->host), err);
2665 }
2666
2667 return err;
2668}
2669EXPORT_SYMBOL(mmc_flush_cache);
2670
1da177e4
LT
2671#ifdef CONFIG_PM
2672
4c2ef25f
ML
2673/* Do the card removal on suspend if card is assumed removeable
2674 * Do that in pm notifier while userspace isn't yet frozen, so we will be able
2675 to sync the card.
2676*/
2677int mmc_pm_notify(struct notifier_block *notify_block,
2678 unsigned long mode, void *unused)
2679{
2680 struct mmc_host *host = container_of(
2681 notify_block, struct mmc_host, pm_notify);
2682 unsigned long flags;
810caddb 2683 int err = 0;
4c2ef25f
ML
2684
2685 switch (mode) {
2686 case PM_HIBERNATION_PREPARE:
2687 case PM_SUSPEND_PREPARE:
184af16b 2688 case PM_RESTORE_PREPARE:
4c2ef25f
ML
2689 spin_lock_irqsave(&host->lock, flags);
2690 host->rescan_disable = 1;
2691 spin_unlock_irqrestore(&host->lock, flags);
2692 cancel_delayed_work_sync(&host->detect);
2693
810caddb
UH
2694 if (!host->bus_ops)
2695 break;
2696
2697 /* Validate prerequisites for suspend */
2698 if (host->bus_ops->pre_suspend)
2699 err = host->bus_ops->pre_suspend(host);
5601aaf7 2700 if (!err)
4c2ef25f
ML
2701 break;
2702
0db13fc2 2703 /* Calling bus_ops->remove() with a claimed host can deadlock */
58a8a4a1 2704 host->bus_ops->remove(host);
0db13fc2 2705 mmc_claim_host(host);
4c2ef25f 2706 mmc_detach_bus(host);
7f7e4129 2707 mmc_power_off(host);
4c2ef25f
ML
2708 mmc_release_host(host);
2709 host->pm_flags = 0;
2710 break;
2711
2712 case PM_POST_SUSPEND:
2713 case PM_POST_HIBERNATION:
274476f8 2714 case PM_POST_RESTORE:
4c2ef25f
ML
2715
2716 spin_lock_irqsave(&host->lock, flags);
2717 host->rescan_disable = 0;
2718 spin_unlock_irqrestore(&host->lock, flags);
bbd43682 2719 _mmc_detect_change(host, 0, false);
4c2ef25f
ML
2720
2721 }
2722
2723 return 0;
2724}
1da177e4
LT
2725#endif
2726
2220eedf
KD
2727/**
2728 * mmc_init_context_info() - init synchronization context
2729 * @host: mmc host
2730 *
2731 * Init struct context_info needed to implement asynchronous
2732 * request mechanism, used by mmc core, host driver and mmc requests
2733 * supplier.
2734 */
2735void mmc_init_context_info(struct mmc_host *host)
2736{
2737 spin_lock_init(&host->context_info.lock);
2738 host->context_info.is_new_req = false;
2739 host->context_info.is_done_rcv = false;
2740 host->context_info.is_waiting_last_req = false;
2741 init_waitqueue_head(&host->context_info.wait);
2742}
2743
ffce2e7e
PO
2744static int __init mmc_init(void)
2745{
2746 int ret;
2747
0d9ee5b2 2748 workqueue = alloc_ordered_workqueue("kmmcd", 0);
ffce2e7e
PO
2749 if (!workqueue)
2750 return -ENOMEM;
2751
2752 ret = mmc_register_bus();
e29a7d73
PO
2753 if (ret)
2754 goto destroy_workqueue;
2755
2756 ret = mmc_register_host_class();
2757 if (ret)
2758 goto unregister_bus;
2759
2760 ret = sdio_register_bus();
2761 if (ret)
2762 goto unregister_host_class;
2763
2764 return 0;
2765
2766unregister_host_class:
2767 mmc_unregister_host_class();
2768unregister_bus:
2769 mmc_unregister_bus();
2770destroy_workqueue:
2771 destroy_workqueue(workqueue);
2772
ffce2e7e
PO
2773 return ret;
2774}
2775
2776static void __exit mmc_exit(void)
2777{
e29a7d73 2778 sdio_unregister_bus();
ffce2e7e
PO
2779 mmc_unregister_host_class();
2780 mmc_unregister_bus();
2781 destroy_workqueue(workqueue);
2782}
2783
26074962 2784subsys_initcall(mmc_init);
ffce2e7e
PO
2785module_exit(mmc_exit);
2786
1da177e4 2787MODULE_LICENSE("GPL");